iSolved Jobs API.
Human capital management platform with integrated ATS for mid-market and enterprise companies.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on iSolved.
- HCM integration
- Rich job metadata
- Salary transparency
- Workplace type info
- Benefits management
- Compliance tracking
- 01Mid-market company tracking
- 02HR platform job extraction
- 03Compensation data collection
- 04Multi-tenant job discovery
How to scrape iSolved.
Step-by-step guide to extracting jobs from iSolved-powered career pages—endpoints, authentication, and working code.
import requests
import re
subdomain = "bectech"
careers_url = f"https://{subdomain}.isolvedhire.com/jobs/"
response = requests.get(careers_url)
html = response.text
# Extract domain_id (site ID) from page source
match = re.search(r'"domain_id"\s*:\s*"(\d+)"', html)
if match:
site_id = match.group(1)
print(f"Found site ID: {site_id}")
else:
print("Could not find site ID")import requests
import json
from urllib.parse import quote
subdomain = "bectech"
site_id = "2332"
# Build the listings URL with getParams
get_params = json.dumps({"isInternal": 0}) # 0 = external jobs only
encoded_params = quote(get_params)
listings_url = f"https://{subdomain}.isolvedhire.com/core/jobs/{site_id}"
params = {"getParams": encoded_params}
headers = {
"Accept": "application/json, text/plain, */*",
"Referer": f"https://{subdomain}.isolvedhire.com/jobs/"
}
response = requests.get(listings_url, params=params, headers=headers)
data = response.json()
jobs = data.get("data", {}).get("jobs", [])
print(f"Found {len(jobs)} jobs")
for job in jobs[:3]: # Show first 3 jobs
print(f" - {job['title']} | {job.get('jobLocation')} | {job.get('employmentType')}")for job in jobs:
# Parse salary information
salary_info = None
if job.get("minSalary") and job.get("maxSalary"):
salary_info = {
"min": int(job["minSalary"]),
"max": int(job["maxSalary"]),
"pay_type": job.get("payType", "Annual"),
"period": job.get("payTypeFrame", "per year")
}
job_data = {
"id": job["id"],
"title": job["title"],
"location": job.get("jobLocation"),
"city": job.get("city"),
"state": job.get("stateName"),
"country": job.get("iso3"),
"department": job.get("orgTitle"),
"classification": job.get("classification"),
"employment_type": job.get("employmentType"),
"workplace_type": job.get("workplaceType"),
"salary": salary_info,
"url": job.get("jobUrl"),
"start_date": job.get("startDateRef"),
"end_date": job.get("endDateRef"),
}
print(job_data)import requests
subdomain = "bectech"
site_id = "2332"
job_id = "1417382"
details_url = f"https://{subdomain}.isolvedhire.com/core/jobs/{site_id}/{job_id}/job-details"
headers = {
"Accept": "application/json, text/plain, */*",
"Referer": f"https://{subdomain}.isolvedhire.com/jobs/{job_id}"
}
response = requests.get(details_url, headers=headers)
data = response.json()
if data.get("success"):
job_details = data.get("data", {})
print({
"id": job_details.get("id"),
"title": job_details.get("title"),
"city": job_details.get("city"),
"description_plain": job_details.get("advertisingDescription", "")[:200],
"description_html": job_details.get("advertisingDescriptionHtml", "")[:200],
"benefits": job_details.get("benefits"),
"zip_code": job_details.get("jobBoardZip"),
})import requests
import xml.etree.ElementTree as ET
sitemap_index_url = "https://feeds.isolvedhire.com/site_map_index.xml"
response = requests.get(sitemap_index_url)
root = ET.fromstring(response.content)
# Extract company subdomains from sitemap
companies = []
ns = {"sm": "http://www.sitemaps.org/schemas/sitemap/0.9"}
for sitemap in root.findall("sm:sitemap", ns):
loc = sitemap.find("sm:loc", ns)
if loc is not None:
url = loc.text
# Extract subdomain from URL like: https://company.isolvedhire.com/job_site_map.xml
if "isolvedhire.com" in url:
subdomain = url.split("//")[1].split(".")[0]
companies.append(subdomain)
print(f"Found {len(companies)} iSolved companies")
print(f"Examples: {companies[:5]}")import requests
import time
from urllib.parse import quote
import json
def fetch_all_jobs(subdomain: str, site_id: str, delay: float = 0.2) -> list:
"""Fetch all jobs with rate limiting and error handling."""
base_url = f"https://{subdomain}.isolvedhire.com"
get_params = quote(json.dumps({"isInternal": 0}))
listings_url = f"{base_url}/core/jobs/{site_id}"
headers = {"Accept": "application/json"}
try:
# Fetch listings
response = requests.get(
listings_url,
params={"getParams": get_params},
headers=headers,
timeout=15
)
response.raise_for_status()
jobs = response.json().get("data", {}).get("jobs", [])
# Fetch details for each job
full_jobs = []
for i, job in enumerate(jobs):
job_id = job["id"]
details_url = f"{base_url}/core/jobs/{site_id}/{job_id}/job-details"
try:
details_resp = requests.get(details_url, headers=headers, timeout=10)
details_resp.raise_for_status()
details = details_resp.json().get("data", {})
full_jobs.append({**job, "description": details.get("advertisingDescriptionHtml")})
except requests.RequestException as e:
print(f"Error fetching job {job_id}: {e}")
time.sleep(delay) # Rate limit
return full_jobs
except requests.RequestException as e:
print(f"Error fetching listings: {e}")
return []
# Usage
jobs = fetch_all_jobs("bectech", "2332")
print(f"Retrieved {len(jobs)} complete job listings")The domain_id may be in different locations depending on the page version. Try multiple regex patterns or extract from API URL patterns in network requests.
The getParams query parameter must be URL-encoded JSON. Use json.dumps() then urllib.parse.quote() to properly format the parameter.
Descriptions require a separate API call per job to the /job-details endpoint. The listings API only returns metadata, not full descriptions.
Job IDs are scoped by site ID. Always use the combination of site_id + job_id to uniquely identify jobs across different iSolved tenants.
iSolved rate limits are undocumented. Add delays (200-500ms) between requests and use timeout parameters. Implement retry logic with exponential backoff.
Some companies disable optional fields like benefits or video. Check for empty strings and null values gracefully in your parsing logic.
- 1Cache the site ID after extraction to avoid repeated page fetches
- 2Use the sitemap index to discover all iSolved companies systematically
- 3Fetch listings first for metadata, then fetch details only for jobs you need
- 4Add 200-500ms delays between requests to respect rate limits
- 5Handle missing optional fields gracefully (benefits, video, description variants)
- 6Use the advertisingDescriptionHtml field for rich HTML job descriptions
One endpoint. All iSolved jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=isolved" \
-H "X-Api-Key: YOUR_KEY" Access iSolved
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.