Zoho Recruit Jobs API.
Cloud-based ATS and recruitment software part of the Zoho business suite, popular with SMBs for its CRM integration and customization options.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Zoho Recruit.
- CRM integration
- Multi-app business suite
- SMB-focused
- Customizable career pages
- Workflow automation
- Multi-domain support
- API and HTML access
- 01SMB job tracking
- 02CRM-integrated recruiting
- 03Multi-location hiring
- 04Applicant pipeline management
- 05India market job sourcing
How to scrape Zoho Recruit.
Step-by-step guide to extracting jobs from Zoho Recruit-powered career pages—endpoints, authentication, and working code.
import requests
def detect_zoho_pattern(company: str) -> str:
"""Detect which Zoho URL pattern a company uses."""
patterns = [
f"https://{company}.zohorecruit.com/jobs/Careers", # Modern
f"https://{company}.zohorecruit.in/jobs/Careers", # India
f"https://{company}.zohorecruit.com/careers", # Legacy
]
for pattern in patterns:
try:
resp = requests.head(pattern, timeout=10, allow_redirects=True)
if resp.status_code == 200:
return pattern
except requests.RequestException:
continue
return None
# Usage
url = detect_zoho_pattern("algoleap")
print(f"Detected URL: {url}")import requests
from bs4 import BeautifulSoup
def fetch_zoho_career_page(url: str) -> str:
"""Fetch HTML from a Zoho Recruit career page."""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
}
response = requests.get(url, headers=headers, timeout=15)
response.raise_for_status()
return response.text
# Usage
html = fetch_zoho_career_page("https://algoleap.zohorecruit.com/jobs/Careers")
print(f"Fetched {len(html)} bytes")from bs4 import BeautifulSoup
import re
def parse_zoho_jobs(html: str) -> list[dict]:
"""Extract job listings from Zoho career page HTML."""
soup = BeautifulSoup(html, 'html.parser')
jobs = []
# Zoho-specific selectors from investigation
job_elements = soup.select('.ziabot-job-listing-row, .cw-job-listing-container > div')
for el in job_elements:
link = el.select_one('h3 > a, a[href*="/jobs/Careers/"]')
if not link:
continue
location_el = el.select_one('.cw-job-location')
job_type_el = el.select_one('.cw-job-type')
jobs.append({
'title': link.get_text(strip=True),
'url': link.get('href'),
'location': location_el.get_text(strip=True) if location_el else None,
'job_type': job_type_el.get_text(strip=True) if job_type_el else None,
})
return jobs
# Usage
jobs = parse_zoho_jobs(html)
print(f"Found {len(jobs)} jobs")
for job in jobs[:3]:
print(f" - {job['title']} ({job['location']})")import re
def parse_zoho_job_url(url: str) -> dict | None:
"""Extract job ID and slug from a Zoho job URL."""
# Pattern: /jobs/Careers/{jobId}/{job-title}
match = re.search(r'/jobs/Careers/(\d+)/([^/?]+)', url)
if not match:
return None
job_id = match.group(1)
slug = match.group(2)
return {
'job_id': job_id,
'slug': slug,
'title': slug.replace('-', ' ').title(),
}
def build_job_detail_url(base_url: str, job_id: str, slug: str) -> str:
"""Construct the full URL for a job detail page."""
return f"{base_url}/{job_id}/{slug}?source=CareerSite"
# Usage
url = "https://algoleap.zohorecruit.com/jobs/Careers/660523000015041023/senior-software-engineer"
parsed = parse_zoho_job_url(url)
print(f"Job ID: {parsed['job_id']}, Title: {parsed['title']}")import requests
from bs4 import BeautifulSoup
import re
def fetch_job_details(job_url: str) -> dict:
"""Fetch detailed information from a Zoho job detail page."""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
}
response = requests.get(job_url, headers=headers, timeout=15)
soup = BeautifulSoup(response.text, 'html.parser')
# Extract from detail page selectors
title_el = soup.select_one('h1')
desc_el = soup.select_one('.cw-job-description, #spandesc')
location_el = soup.select_one('.cw-job-detail-meta .cw-detail-value')
return {
'title': title_el.get_text(strip=True) if title_el else None,
'description': desc_el.get_text(strip=True) if desc_el else None,
'description_html': str(desc_el) if desc_el else None,
'location': location_el.get_text(strip=True) if location_el else None,
'url': job_url,
}
# Usage
details = fetch_job_details("https://algoleap.zohorecruit.com/jobs/Careers/660523000015041023/senior-software-engineer?source=CareerSite")
print(f"Title: {details['title']}")import json
from bs4 import BeautifulSoup
def extract_json_ld(html: str) -> list[dict]:
"""Extract JobPosting JSON-LD data from the page."""
soup = BeautifulSoup(html, 'html.parser')
jobs = []
for script in soup.find_all('script', type='application/ld+json'):
try:
data = json.loads(script.string)
# Handle both single object and array formats
items = data if isinstance(data, list) else [data]
for item in items:
if item.get('@type') == 'JobPosting':
location = item.get('jobLocation', {})
address = location.get('address', {}) if isinstance(location, dict) else {}
jobs.append({
'title': item.get('title'),
'description': item.get('description'),
'location': address.get('addressLocality'),
'date_posted': item.get('datePosted'),
'employment_type': item.get('employmentType'),
'url': item.get('url'),
})
except (json.JSONDecodeError, AttributeError, TypeError):
continue
return jobs
# Usage
json_jobs = extract_json_ld(html)
print(f"Found {len(json_jobs)} jobs via JSON-LD")Zoho has at least three URL patterns: modern /jobs/Careers, legacy /careers with API, and custom paths. Always detect which pattern a company uses before scraping.
Unlike other ATS platforms, Zoho doesn't provide a single endpoint to get all jobs. You must scrape HTML pages or use the PortalDetail.na API for individual job details on legacy sites.
The PortalDetail.na API requires a session-specific 'digest' parameter that changes per visit. You must first visit the careers page to extract this token before making API calls.
Zoho allows companies to customize career page appearance. Build parsers with multiple fallback CSS selectors and prioritize JSON-LD data when available.
Indian companies use .zohorecruit.in instead of .zohorecruit.com. Try both domain patterns if one fails.
Job IDs can be different lengths (e.g., 660523000015041023 vs 248544000244520654). Use regex pattern /jobs/Careers/(\d+) to extract IDs reliably.
- 1Detect URL pattern type before scraping (modern vs legacy)
- 2Check for JSON-LD structured data before HTML parsing
- 3Use multiple CSS selector fallbacks for robustness
- 4Implement 1-2 second delays between requests
- 5Support both .zohorecruit.com and .zohorecruit.in domains
- 6Extract job IDs from URLs using regex for deduplication
- 7Cache results to minimize repeated requests to the same company
One endpoint. All Zoho Recruit jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=zoho recruit" \
-H "X-Api-Key: YOUR_KEY" Access Zoho Recruit
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.