Workday Jobs API.
Enterprise-grade HCM platform used by Fortune 500 companies for recruiting and talent management.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Workday.
- CXS JSON API access
- Full HTML job descriptions
- Multi-location support
- Pagination handling
- No authentication required
- Sitemap-based discovery
- Multiple URL format support
- 01Enterprise job aggregation
- 02Fortune 500 company tracking
- 03Large employer monitoring
- 04Global talent market analysis
How to scrape Workday.
Step-by-step guide to extracting jobs from Workday-powered career pages—endpoints, authentication, and working code.
import re
def parse_workday_url(url: str) -> dict:
"""Extract tenant and site from a Workday URL."""
# Pattern: https://{tenant}.wd{N}.myworkdayjobs.com/{locale}/{site}
match = re.match(
r'https://([^.]+).wdd+.myworkdayjobs.com/(?:[a-z]{2}-[A-Z]{2}/)?([^/]+)',
url
)
if match:
return {
"tenant": match.group(1),
"site": match.group(2),
"base_url": url.rstrip('/').split('/job/')[0]
}
# Handle myworkdaysite.com format
match = re.match(
r'https://jobs.myworkdaysite.com/recruiting/([^/]+)/([^/]+)',
url
)
if match:
return {
"tenant": match.group(1),
"site": match.group(2),
"base_url": url.split('/job/')[0]
}
return None
# Example usage
url = "https://kainos.wd3.myworkdayjobs.com/en-US/kainos"
config = parse_workday_url(url)
print(config) # {'tenant': 'kainos', 'site': 'kainos', 'base_url': '...'}import requests
def fetch_jobs_listings(tenant: str, site: str, wd_server: str = "wd3", limit: int = 20, offset: int = 0) -> dict:
"""Fetch job listings from Workday CXS API."""
url = f"https://{tenant}.{wd_server}.myworkdayjobs.com/wday/cxs/{tenant}/{site}/jobs"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Accept-Language": "en-US",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
"Referer": f"https://{tenant}.{wd_server}.myworkdayjobs.com/en-US/{site}",
}
payload = {
"appliedFacets": {},
"limit": limit,
"offset": offset,
"searchText": ""
}
response = requests.post(url, json=payload, headers=headers)
response.raise_for_status()
return response.json()
# Fetch first page of jobs
data = fetch_jobs_listings("kainos", "kainos", wd_server="wd3", limit=20, offset=0)
print(f"Total jobs: {data.get('total', 0)}")
print(f"Jobs in this page: {len(data.get('jobPostings', []))}")def parse_listings(data: dict) -> list:
"""Parse job listings from Workday API response."""
jobs = []
for posting in data.get("jobPostings", []):
# Extract requisition ID from bulletFields
req_id = None
for field in posting.get("bulletFields", []):
if field.startswith(("JR_", "REQ", "R-")):
req_id = field
break
job = {
"title": posting.get("title"),
"external_path": posting.get("externalPath"),
"locations": posting.get("locationsText"),
"posted_on": posting.get("postedOn"),
"requisition_id": req_id,
"url": posting.get("externalUrl"),
}
jobs.append(job)
return jobs
# Parse and display jobs
jobs = parse_listings(data)
for job in jobs[:3]:
print(f"- {job['title']} ({job['requisition_id']}) at {job['locations']}")import time
def fetch_all_jobs(tenant: str, site: str, wd_server: str = "wd3", batch_size: int = 20) -> list:
"""Fetch all jobs with pagination handling."""
all_jobs = []
offset = 0
while True:
data = fetch_jobs_listings(tenant, site, wd_server, limit=batch_size, offset=offset)
postings = data.get("jobPostings", [])
total = data.get("total", 0)
if not postings:
break
all_jobs.extend(postings)
print(f"Fetched {len(all_jobs)} of {total} jobs...")
# Check if we've fetched all jobs
if offset + batch_size >= total:
break
offset += batch_size
time.sleep(1.5) # Be respectful to the API
return all_jobs
# Fetch all jobs for a company
all_jobs = fetch_all_jobs("kainos", "kainos", "wd3")
print(f"Total jobs fetched: {len(all_jobs)}")def fetch_job_details(tenant: str, site: str, external_path: str, wd_server: str = "wd3") -> dict:
"""Fetch full details for a single job."""
url = f"https://{tenant}.{wd_server}.myworkdayjobs.com/wday/cxs/{tenant}/{site}/job/{external_path}"
headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"https://{tenant}.{wd_server}.myworkdayjobs.com/en-US/{site}",
}
response = requests.get(url, headers=headers)
response.raise_for_status()
return response.json()
# Fetch details for a specific job
job_listings = fetch_jobs_listings("kainos", "kainos", limit=1)
if job_listings.get("jobPostings"):
first_job = job_listings["jobPostings"][0]
details = fetch_job_details("kainos", "kainos", first_job["externalPath"])
print(f"Title: {details.get('title')}")
print(f"Location: {details.get('location')}")
print(f"Additional Locations: {details.get('additionalLocations', [])}")
print(f"Time Type: {details.get('timeType')}")
print(f"Job Req ID: {details.get('jobReqId')}")
print(f"Start Date: {details.get('startDate')}")
print(f"Description length: {len(details.get('jobDescription', ''))} chars")import requests
from xml.etree import ElementTree
def fetch_sitemap(tenant: str, site: str, wd_server: str = "wd3") -> list:
"""Fetch job URLs from Workday sitemap."""
# Note: siteMap.xml has capital S
url = f"https://{tenant}.{wd_server}.myworkdayjobs.com/en-US/{site}/siteMap.xml"
headers = {
"Accept": "application/xml",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
}
response = requests.get(url, headers=headers)
response.raise_for_status()
# Parse XML and extract job URLs
root = ElementTree.fromstring(response.content)
namespace = {'ns': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
job_urls = []
for url_elem in root.findall('.//ns:url/ns:loc', namespace):
loc = url_elem.text
if '/job/' in loc:
job_urls.append(loc)
return job_urls
# Get all job URLs from sitemap
job_urls = fetch_sitemap("kainos", "kainos", "wd3")
print(f"Found {len(job_urls)} job URLs in sitemap")
for url in job_urls[:3]:
print(f" - {url}")The API URL must follow the pattern /wday/cxs/{tenant}/{site}/jobs. Ensure you extract both tenant (from hostname) and site (from path) correctly. The tenant is the subdomain before .wd{N}.myworkdayjobs.com.
The listings endpoint requires a POST request with a JSON body, not GET. Include appliedFacets, limit, offset, and searchText in the request body even if empty.
The listings API returns only basic job info (title, location, path). You must fetch individual job details using the GET endpoint /wday/cxs/{tenant}/{site}/job/{externalPath} to get full HTML descriptions.
Companies use different Workday data centers (wd1, wd3, wd5, etc.). Always check the actual URL from the company's careers page. Do not assume wd3 works for all companies.
Use offset-based pagination correctly. Stop when jobPostings array is empty OR when offset + limit >= total. Some companies may have fewer jobs than the total indicates due to filtering.
Workday uses multiple requisition ID formats: JR_12345, JR_12345-1, REQ12345, R-00066362. Parse bulletFields array to find the ID, don't assume a specific format.
The sitemap filename is siteMap.xml with capital S, not sitemap.xml. Also, some companies may not have a sitemap enabled. Fall back to API pagination if the sitemap is unavailable.
Some companies use jobs.myworkdaysite.com/recruiting/{company}/{site} instead of the wd{N}.myworkdayjobs.com format. Your URL parser should handle both formats.
- 1Use POST for listings endpoint, GET for job details
- 2No authentication or CSRF token required for public job boards
- 3Add 1.5-2 second delays between requests to avoid rate limiting
- 4Fetch job details separately for full HTML descriptions
- 5Extract tenant and site from URLs programmatically to handle variations
- 6Use the sitemap at /siteMap.xml (capital S) for efficient job discovery
- 7Handle multiple Workday server numbers (wd1, wd2, wd3, wd4, wd5)
- 8Include Accept-Encoding: gzip header for compressed responses
One endpoint. All Workday jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=workday" \
-H "X-Api-Key: YOUR_KEY" Access Workday
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.