CSOD Jobs API.
Cornerstone OnDemand recruiting platform used by enterprise organizations across regions. Also known as Cornerstone Recruiting.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on CSOD.
- Enterprise recruitment workflows
- Regional API deployments
- Job taxonomy support
- Integrated talent suite
- Structured posting data
- Full descriptions in API
- 01Enterprise job monitoring
- 02Multi-region talent sourcing
- 03Large company career tracking
- 04Global recruitment analysis
How to scrape CSOD.
Step-by-step guide to extracting jobs from CSOD-powered career pages—endpoints, authentication, and working code.
import re
from urllib.parse import urlparse, parse_qs
def parse_csod_url(url: str) -> dict:
"""
Parse CSOD URL to extract company and site ID.
Format: https://{company}.csod.com/ux/ats/careersite/{siteId}/home?c={company}
"""
parsed = urlparse(url)
# Extract company from subdomain
company = parsed.hostname.split('.')[0]
# Extract site ID from path
match = re.search(r'/careersite/(d+)', parsed.path)
site_id = int(match.group(1)) if match else 1
# Extract from query param as fallback
query_params = parse_qs(parsed.query)
company = query_params.get('c', [company])[0]
return {
"company": company,
"site_id": site_id,
"base_url": f"https://{company}.csod.com"
}
# Example usage
url = "https://henkel.csod.com/ux/ats/careersite/1/home?c=henkel"
config = parse_csod_url(url)
print(f"Company: {config['company']}, Site ID: {config['site_id']}")import requests
import re
def extract_csod_context(company: str) -> dict:
"""Extract JWT token and API endpoint from CSOD career site."""
url = f"https://{company}.csod.com/ux/ats/careersite/1/home?c={company}"
response = requests.get(url, timeout=30)
response.raise_for_status()
html = response.text
# Extract JWT token from embedded JavaScript
token_match = re.search(r'csod\.context\.token\s*=\s*["']([^"']+)["']', html)
token = token_match.group(1) if token_match else None
# Extract regional API endpoint
cloud_match = re.search(r'csod\.context\.endpoints\.cloud\s*=\s*["']([^"']+)["']', html)
cloud_endpoint = cloud_match.group(1).rstrip('/') if cloud_match else None
if not token or not cloud_endpoint:
raise ValueError("Failed to extract CSOD context from page")
return {
"token": token,
"cloud_endpoint": cloud_endpoint,
"company": company
}
# Example usage
context = extract_csod_context("henkel")
print(f"Token length: {len(context['token'])}")
print(f"API endpoint: {context['cloud_endpoint']}")import requests
def fetch_csod_jobs(context: dict, page: int = 1, page_size: int = 25) -> dict:
"""Fetch jobs from CSOD API using extracted token."""
api_url = f"{context['cloud_endpoint']}/rec-job-search/external/jobs"
headers = {
"Authorization": f"Bearer {context['token']}",
"Content-Type": "application/json",
"Origin": f"https://{context['company']}.csod.com",
"Referer": f"https://{context['company']}.csod.com/",
"Csod-Accept-Language": "en-US",
}
payload = {
"careerSiteId": 1,
"careerSitePageId": 1,
"pageNumber": page,
"pageSize": page_size,
"cultureId": 1,
"searchText": "",
"cultureName": "en-US",
"states": [],
"countryCodes": [],
"cities": [],
"placeID": "",
"radius": None,
"postingsWithinDays": None,
"customFieldCheckboxKeys": [],
"customFieldDropdowns": [],
"customFieldRadios": [],
}
response = requests.post(api_url, json=payload, headers=headers, timeout=30)
response.raise_for_status()
return response.json()
# Example usage
data = fetch_csod_jobs(context, page=1, page_size=25)
print(f"Total jobs: {data['data']['totalCount']}")
print(f"Jobs on this page: {len(data['data']['requisitions'])}")def parse_csod_job(requisition: dict, company: str, site_id: int = 1) -> dict:
"""Parse a CSOD requisition into a standardized job object."""
return {
"id": requisition.get("requisitionId"),
"title": requisition.get("displayJobTitle"),
"description": requisition.get("externalDescription", ""),
"locations": [
{
"city": loc.get("city"),
"state": loc.get("state"),
"country": loc.get("country"),
}
for loc in requisition.get("locations", [])
],
"posted_date": requisition.get("postingEffectiveDate"),
"expiration_date": requisition.get("postingExpirationDate"),
"url": f"https://{company}.csod.com/ux/ats/careersite/{site_id}/home/requisition/{requisition['requisitionId']}?c={company}",
}
# Parse all jobs from the response
jobs = [
parse_csod_job(req, context["company"])
for req in data["data"]["requisitions"]
]
for job in jobs[:3]:
print(f"- {job['title']} ({job['locations'][0]['city'] if job['locations'] else 'Remote'})")
print(f" Description length: {len(job['description'])} chars")import time
def fetch_all_csod_jobs(context: dict, page_size: int = 25, delay: float = 1.5) -> list:
"""Fetch all jobs from CSOD with pagination handling."""
all_jobs = []
page = 1
total_count = None
while True:
print(f"Fetching page {page}...")
try:
data = fetch_csod_jobs(context, page=page, page_size=page_size)
except requests.HTTPError as e:
if e.response.status_code == 401:
print("Token expired, refreshing...")
context = extract_csod_context(context["company"])
data = fetch_csod_jobs(context, page=page, page_size=page_size)
else:
raise
requisitions = data.get("data", {}).get("requisitions", [])
if not requisitions:
break
# Parse jobs from this page
for req in requisitions:
all_jobs.append(parse_csod_job(req, context["company"]))
total_count = data["data"].get("totalCount", 0)
print(f" Page {page}: {len(requisitions)} jobs (total: {len(all_jobs)}/{total_count})")
# Check if we've fetched all jobs
if len(all_jobs) >= total_count:
break
page += 1
time.sleep(delay) # Rate limiting
return all_jobs
# Fetch all jobs
all_jobs = fetch_all_csod_jobs(context)
print(f"Total jobs fetched: {len(all_jobs)}")CSOD tokens expire after approximately 1 hour. Implement token refresh logic that re-fetches the career site page and extracts a new token when the API returns 401 errors.
The cloud endpoint varies by company region (uk.api.csod.com, eu-fra.api.csod.com, etc.). Always extract it from csod.context.endpoints.cloud in the page JavaScript rather than hardcoding.
API requests must include correct Origin and Referer headers matching the career site domain. Server-side requests avoid CORS issues entirely and are recommended for production scraping.
Some companies use legacy CSOD URLs without /ux/ats/careersite/ which redirect to login pages. Only modern UX URLs are publicly accessible. Check the URL format before attempting to scrape.
Site ID and Culture ID are company-specific. Extract these from the URL or page context. Default to siteId=1 and cultureId=1 (en-US) if unknown, but verify the site ID for multi-portal companies.
The API may occasionally return 504 errors during high load. Implement retry logic with exponential backoff (start with 5s delay, double on each retry) for robust error handling.
- 1Extract the API endpoint from csod.context.endpoints.cloud - never hardcode regional URLs
- 2Cache tokens per company and refresh when expired (tokens last ~1 hour)
- 3Add 1-2 second delays between requests to respect rate limits
- 4Include correct Origin and Referer headers to avoid CORS issues
- 5Handle 401 errors gracefully by refreshing the token and retrying
- 6Use server-side requests to avoid browser CORS restrictions entirely
One endpoint. All CSOD jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=csod" \
-H "X-Api-Key: YOUR_KEY" Access CSOD
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.