ApplicantPro Jobs API.
Applicant tracking system designed for small and medium businesses, now branded as isolved Talent Acquisition with a public JSON API.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on ApplicantPro.
- SMB focus
- Easy setup
- Clean job data
- Company branding
- Application management
- Public JSON API
- No authentication required
- Comprehensive salary data
- HTML and plain text descriptions
- 01SMB job aggregation
- 02Small business recruiting
- 03Mid-market talent acquisition
- 04Multi-company job discovery
How to scrape ApplicantPro.
Step-by-step guide to extracting jobs from ApplicantPro-powered career pages—endpoints, authentication, and working code.
import requests
import re
from urllib.parse import urlparse
def get_site_id(subdomain: str) -> str | None:
url = f"https://{subdomain}.applicantpro.com/jobs/"
response = requests.get(url, timeout=10)
response.raise_for_status()
# Extract domain_id from embedded JavaScript
match = re.search(r'"domain_id"\s*:\s*"(d+)"', response.text)
if match:
return match.group(1)
return None
site_id = get_site_id("harvardbioscience")
print(f"Site ID: {site_id}") # Output: Site ID: 11099import requests
import json
from urllib.parse import quote
subdomain = "harvardbioscience"
site_id = "11099"
get_params = {
"isInternal": 0,
"showLocation": 1,
"showEmploymentType": 1,
"chatToApplyButton": "0"
}
# URL-encode the JSON params
encoded_params = quote(json.dumps(get_params))
listings_url = f"https://{subdomain}.applicantpro.com/core/jobs/{site_id}?getParams={encoded_params}"
headers = {
"Accept": "application/json",
"Referer": f"https://{subdomain}.applicantpro.com/jobs/"
}
response = requests.get(listings_url, headers=headers, timeout=10)
data = response.json()
jobs = data.get("data", {}).get("jobs", [])
job_count = data.get("data", {}).get("jobCount", 0)
print(f"Found {len(jobs)} jobs (API reports {job_count} total)")for job in jobs:
print({
"id": job.get("id"),
"title": job.get("title"),
"location": job.get("jobLocation"),
"city": job.get("city"),
"state": job.get("stateName"),
"country": job.get("iso3"),
"department": job.get("orgTitle"),
"classification": job.get("classification"),
"employment_type": job.get("employmentType"),
"workplace_type": job.get("workplaceType"),
"pay_type": job.get("payType"),
"pay_details": job.get("payDetails"),
"min_salary": job.get("minSalary"),
"max_salary": job.get("maxSalary"),
"job_url": job.get("jobUrl"),
"posted_date": job.get("startDateRef"),
"expiry_date": job.get("endDateRef"),
})import time
def get_job_details(subdomain: str, site_id: str, job_id: int) -> dict:
url = f"https://{subdomain}.applicantpro.com/core/jobs/{site_id}/{job_id}/job-details"
headers = {
"Accept": "application/json",
"Referer": f"https://{subdomain}.applicantpro.com/jobs/{job_id}"
}
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
return response.json().get("data", {})
# Fetch details for first job with rate limiting
if jobs:
details = get_job_details(subdomain, site_id, jobs[0]["id"])
print({
"id": details.get("id"),
"title": details.get("title"),
"city": details.get("city"),
"description_html": details.get("advertisingDescriptionHtml", "")[:200],
"description_plain": details.get("advertisingDescription", "")[:200],
"benefits": details.get("benefits"),
"zip_code": details.get("jobBoardZip"),
"pay_details": details.get("payDetails"),
})
time.sleep(0.5) # Be respectful with rate limitingdef safe_extract(subdomain: str) -> list[dict]:
try:
site_id = get_site_id(subdomain)
if not site_id:
print(f"Could not find site ID for {subdomain}")
return []
get_params = {"isInternal": 0, "showLocation": 1}
encoded_params = quote(json.dumps(get_params))
url = f"https://{subdomain}.applicantpro.com/core/jobs/{site_id}?getParams={encoded_params}"
response = requests.get(url, headers={"Accept": "application/json"}, timeout=10)
response.raise_for_status()
data = response.json()
if not data.get("success"):
print(f"API returned error for {subdomain}")
return []
return data.get("data", {}).get("jobs", [])
except requests.RequestException as e:
print(f"Request failed for {subdomain}: {e}")
return []
jobs = safe_extract("harvardbioscience")import requests
import xml.etree.ElementTree as ET
# Global sitemap index lists all ApplicantPro companies
sitemap_index_url = "https://feeds.applicantpro.com/site_map_index.xml"
response = requests.get(sitemap_index_url, timeout=10)
root = ET.fromstring(response.content)
# Extract company sitemap URLs
namespaces = {"ns": "http://www.sitemaps.org/schemas/sitemap/0.9"}
company_sitemaps = []
for sitemap in root.findall("ns:sitemap", namespaces):
loc = sitemap.find("ns:loc", namespaces)
if loc is not None:
company_sitemaps.append(loc.text)
print(f"Found {len(company_sitemaps)} company sitemaps")
# Parse individual company sitemap for job URLs
def parse_company_sitemap(sitemap_url: str) -> list[str]:
response = requests.get(sitemap_url, timeout=10)
root = ET.fromstring(response.content)
job_urls = []
for url in root.findall("ns:url", namespaces):
loc = url.find("ns:loc", namespaces)
if loc is not None and "/jobs/" in loc.text:
job_urls.append(loc.text)
return job_urls
# Example: Get jobs from first company sitemap
if company_sitemaps:
jobs = parse_company_sitemap(company_sitemaps[0])
print(f"Found {len(jobs)} job URLs in first sitemap")The page structure may have changed. Try alternative regex patterns or look for the domain_id in script tags within the courierCurrentRouteData object. Some companies use custom domains that redirect to ApplicantPro.
The listings API does not include descriptions. You must make a separate API call to the job-details endpoint for each job to get the full description and benefits.
Some companies use custom domains that redirect to ApplicantPro. Follow redirects and extract the actual subdomain from the final URL before extracting the site ID.
Some companies may have no active postings. Check the jobCount field in the response and handle empty arrays gracefully in your code.
Dates appear in different formats across endpoints (e.g., 'Jan 23, 2026' vs '23-Jan-2026'). Use flexible date parsing libraries like dateutil to handle both formats.
While there are no official limits documented, unthrottled requests may trigger temporary blocks. Add delays of 0.5-1 second between detail requests for large batches.
Salary fields (minSalary, maxSalary) may be empty strings. Always check for truthy values before parsing. Pay details are often in payDetails as text rather than structured data.
- 1Cache the site ID per subdomain to avoid repeated page fetches
- 2Use advertisingDescriptionHtml over plain text for better formatting
- 3Add 0.5-1 second delays between detail requests to avoid rate limiting
- 4Handle both date formats returned by different endpoints (e.g., 'Jan 23, 2026' vs '23-Jan-2026')
- 5Use the jobUrl field from the API response when available instead of constructing URLs
- 6Fetch benefits information from the details endpoint - not available in listings
- 7Include Referer headers matching the job board URL for better compatibility
- 8Use the global sitemap index at feeds.applicantpro.com for company discovery
One endpoint. All ApplicantPro jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=applicantpro" \
-H "X-Api-Key: YOUR_KEY" Access ApplicantPro
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.