Comeet Jobs API.
Collaborative recruiting platform that embeds complete job data in HTML pages for easy extraction.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Comeet.
- Team collaboration
- Interview scheduling
- Candidate tracking
- Workflow automation
- Analytics
- Embedded JSON data
- 01Mid-market job boards
- 02Tech company tracking
- 03Startup job aggregation
How to scrape Comeet.
Step-by-step guide to extracting jobs from Comeet-powered career pages—endpoints, authentication, and working code.
import requests
from bs4 import BeautifulSoup
import re
import json
company_slug = "monday"
company_uid = "41.00B"
url = f"https://www.comeet.com/jobs/{company_slug}/{company_uid}"
response = requests.get(url)
response.raise_for_status()
html = response.text
print(f"Fetched page: {len(html)} bytes")# Extract the embedded JSON data using regex
pattern = r'COMPANY_POSITIONS_DATA\s*=\s*(\[.*?\]);\s*</script>'
match = re.search(pattern, html, re.DOTALL)
if not match:
raise ValueError("Could not find COMPANY_POSITIONS_DATA in HTML")
jobs_data = json.loads(match.group(1))
print(f"Found {len(jobs_data)} jobs")for job in jobs_data:
# Extract description from custom fields
details = job.get("customFields", {}).get("details", [])
description_parts = []
for detail in details:
field_name = detail.get("name", "").lower()
field_value = detail.get("value", "")
description_parts.append(f"**{detail.get('name')}**\n{field_value}")
full_description = "\n\n".join(description_parts)
parsed_job = {
"id": job.get("uid"),
"title": job.get("name"),
"department": job.get("department"),
"location": job.get("location", {}).get("name"),
"city": job.get("location", {}).get("city"),
"country": job.get("location", {}).get("country"),
"is_remote": job.get("location", {}).get("isRemote", False),
"employment_type": job.get("employmentType"),
"experience_level": job.get("experienceLevel"),
"workplace_type": job.get("workplaceType"),
"url": job.get("urlComeetHostedPage"),
"description": full_description,
"updated_at": job.get("timeUpdated"),
}
print(f" - {parsed_job['title']} ({parsed_job['location']})")def discover_comeet_company(short_slug: str) -> dict:
"""Follow redirect to get full Comeet URL with company UID."""
url = f"https://www.comeet.com/jobs/{short_slug}/"
response = requests.get(url, allow_redirects=False)
if response.status_code in (301, 302, 303, 307, 308):
redirect_url = response.headers.get("Location", "")
# Parse: https://www.comeet.com/jobs/monday/41.00B
parts = redirect_url.rstrip("/").split("/")
if len(parts) >= 2:
return {
"slug": parts[-2],
"uid": parts[-1],
"full_url": redirect_url
}
# No redirect, try fetching directly
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
# Look for COMPANY_DATA in the page
company_pattern = r'COMPANY_DATA\s*=\s*(\{.*?\});'
match = re.search(company_pattern, response.text, re.DOTALL)
if match:
company_data = json.loads(match.group(1))
return {
"slug": company_data.get("slug"),
"uid": company_data.get("companyUid"),
"name": company_data.get("name"),
}
return None
# Example usage
company_info = discover_comeet_company("monday")
print(f"Company: {company_info}")import time
from typing import Optional, List, Dict, Any
def scrape_comeet_jobs(
company_slug: str,
company_uid: str,
delay: float = 1.0
) -> List[Dict[str, Any]]:
"""Scrape all jobs from a Comeet company with error handling."""
url = f"https://www.comeet.com/jobs/{company_slug}/{company_uid}"
try:
time.sleep(delay) # Rate limiting
response = requests.get(url, timeout=30)
response.raise_for_status()
except requests.RequestException as e:
print(f"Error fetching {url}: {e}")
return []
# Extract embedded JSON
pattern = r'COMPANY_POSITIONS_DATA\s*=\s*(\[.*?\]);\s*</script>'
match = re.search(pattern, response.text, re.DOTALL)
if not match:
print(f"Could not find job data in {url}")
return []
try:
jobs_data = json.loads(match.group(1))
except json.JSONDecodeError as e:
print(f"Error parsing JSON from {url}: {e}")
return []
# Process jobs
jobs = []
for job in jobs_data:
jobs.append({
"id": job.get("uid"),
"title": job.get("name"),
"department": job.get("department"),
"location": job.get("location", {}).get("name", "Unknown"),
"url": job.get("urlComeetHostedPage"),
})
return jobs
# Scrape multiple companies
companies = [
("monday", "41.00B"),
("fiverr", "60.002"),
("brightdata", "88.007"),
]
for slug, uid in companies:
jobs = scrape_comeet_jobs(slug, uid)
print(f"{slug}: {len(jobs)} jobs")Visit the company's careers page and check the URL or redirects. Use the discover_comeet_company function to follow redirects from the shorter URL format. Company UIDs follow patterns like '41.00B', '60.002', etc.
Some companies may use custom domains or different page structures. Check for alternative variable names or fall back to HTML scraping using selectors like 'a.positionItem' and 'span.positionName'.
The regex pattern may not match all variations. Try using a more lenient pattern or BeautifulSoup to find script tags containing the data. Also check for escaped characters in the JSON.
Not all companies configure the same custom fields. Always check if customFields.details exists before accessing it. Use .get() with default values to handle missing fields gracefully.
The description fields contain HTML markup. Use BeautifulSoup or html.parser to extract plain text if needed, or keep the HTML for rich display.
Some companies use custom domains like careers.company.com that redirect to Comeet. Follow redirects and extract the final URL to get the company slug and UID.
- 1Extract embedded JSON from HTML instead of making individual job requests
- 2Use regex to find COMPANY_POSITIONS_DATA variable for reliable data extraction
- 3Always handle missing custom fields gracefully with .get() and defaults
- 4Add delays between requests to be respectful to Comeet servers
- 5Cache results - job boards typically update daily at most
- 6Store the company UID alongside the slug for faster subsequent fetches
One endpoint. All Comeet jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=comeet" \
-H "X-Api-Key: YOUR_KEY" Access Comeet
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.