UltiPro (UKG) Jobs API.
UKG's enterprise HR and talent management platform with hybrid API/HTML job board architecture.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on UltiPro (UKG).
- HCM integration
- Payroll
- Talent management
- Analytics
- Compliance
- Multi-location support
- 01Enterprise job aggregation
- 02Healthcare recruiting
- 03Manufacturing talent sourcing
- 04Multi-company job discovery
How to scrape UltiPro (UKG).
Step-by-step guide to extracting jobs from UltiPro (UKG)-powered career pages—endpoints, authentication, and working code.
import re
# UltiPro URL patterns:
# https://recruiting.ultipro.com/{COMPANY_CODE}/JobBoard/{BOARD_ID}/
# https://recruiting2.ultipro.com/{COMPANY_CODE}/JobBoard/{BOARD_ID}/
company_url = "https://recruiting.ultipro.com/HOP1003HOPN/JobBoard/b3a1c5d7-6c5e-46f6-8478-cd649884f0ef"
# Extract company code
company_code_match = re.search(r'recruiting\d*\.ultipro\.(?:com|ca)/([^/]+)', company_url)
company_code = company_code_match.group(1) if company_code_match else None
# Extract board ID
board_id_match = re.search(r'JobBoard/([^/]+)', company_url)
board_id = board_id_match.group(1) if board_id_match else None
print(f"Company Code: {company_code}") # HOP1003HOPN
print(f"Board ID: {board_id}") # UUIDimport requests
api_url = f"https://recruiting.ultipro.com/{company_code}/JobBoard/{board_id}/JobBoardView/LoadSearchResults"
headers = {
"Content-Type": "application/json; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
}
payload = {
"opportunitySearch": {
"Top": 50,
"Skip": 0,
"QueryString": "",
"OrderBy": [{
"Value": "postedDateDesc",
"PropertyName": "PostedDate",
"Ascending": False
}],
"Filters": [
{"t": "TermsSearchFilterDto", "fieldName": 4, "extra": None, "values": []},
{"t": "TermsSearchFilterDto", "fieldName": 5, "extra": None, "values": []},
{"t": "TermsSearchFilterDto", "fieldName": 6, "extra": None, "values": []},
{"t": "TermsSearchFilterDto", "fieldName": 37, "extra": None, "values": []}
]
},
"matchCriteria": {
"PreferredJobs": [],
"Educations": [],
"LicenseAndCertifications": [],
"Skills": [],
"hasNoLicenses": False,
"SkippedSkills": []
}
}
response = requests.post(api_url, json=payload, headers=headers)
data = response.json()
print(f"Found {data.get('totalCount', 0)} total jobs")jobs = []
for job in data.get("opportunities", []):
location_info = None
if job.get("Locations") and len(job["Locations"]) > 0:
addr = job["Locations"][0].get("Address", {})
location_info = {
"city": addr.get("City"),
"state": addr.get("State", {}).get("Code"),
"postal_code": addr.get("PostalCode"),
"country": addr.get("Country", {}).get("Code"),
}
# JobLocationType: 1=On-site, 2=Hybrid, 3=Remote
location_type_map = {1: "On-site", 2: "Hybrid", 3: "Remote"}
jobs.append({
"id": job.get("Id"),
"title": job.get("Title"),
"requisition_number": job.get("RequisitionNumber"),
"full_time": job.get("FullTime"),
"job_category": job.get("JobCategoryName"),
"location": location_info,
"posted_date": job.get("PostedDate"),
"brief_description": job.get("BriefDescription", "")[:200],
"location_type": location_type_map.get(job.get("JobLocationType"), "Unknown"),
})
print(f"Parsed {len(jobs)} jobs")import time
all_jobs = []
skip = 0
page_size = 50
while True:
payload["opportunitySearch"]["Skip"] = skip
payload["opportunitySearch"]["Top"] = page_size
response = requests.post(api_url, json=payload, headers=headers)
data = response.json()
opportunities = data.get("opportunities", [])
if not opportunities:
break
all_jobs.extend(opportunities)
total_count = data.get("totalCount", 0)
print(f"Fetched {len(opportunities)} jobs (total: {len(all_jobs)}/{total_count})")
if len(all_jobs) >= total_count:
break
skip += page_size
time.sleep(1) # Rate limiting delay
print(f"Retrieved {len(all_jobs)} total jobs")from bs4 import BeautifulSoup
def fetch_job_details(company_code: str, board_id: str, job_id: str) -> dict:
detail_url = (
f"https://recruiting.ultipro.com/{company_code}/JobBoard/{board_id}"
f"/OpportunityDetail?opportunityId={job_id}"
)
response = requests.get(detail_url)
soup = BeautifulSoup(response.text, "html.parser")
# Find the full description - selectors may vary by company
description_selectors = [
".job-description",
".description",
".opportunity-description",
"#job-description",
"[class*='description']",
]
full_description = None
for selector in description_selectors:
element = soup.select_one(selector)
if element:
full_description = element.get_text(strip=True)
break
# Extract requirements if available
requirements = None
req_element = soup.select_one(".requirements, .qualifications")
if req_element:
requirements = req_element.get_text(strip=True)
return {
"full_description": full_description,
"requirements": requirements,
"html_length": len(response.text),
}
# Example usage
if all_jobs:
job_id = all_jobs[0].get("Id")
details = fetch_job_details(company_code, board_id, job_id)
print(f"Description length: {len(details.get('full_description', ''))}")import re
def parse_ultipro_url(url: str) -> dict:
"""Extract all components from an UltiPro job board URL."""
patterns = {
"base_url": r'(https://recruiting\d*\.ultipro\.(?:com|ca))',
"company_code": r'recruiting\d*\.ultipro\.(?:com|ca)/([^/]+)',
"board_id": r'JobBoard/([^/]+)',
}
result = {"original_url": url}
# Extract base URL (includes subdomain)
base_match = re.search(patterns["base_url"], url)
result["base_url"] = base_match.group(1) if base_match else None
# Extract company code
code_match = re.search(patterns["company_code"], url)
result["company_code"] = code_match.group(1) if code_match else None
# Extract board ID
board_match = re.search(patterns["board_id"], url)
result["board_id"] = board_match.group(1) if board_match else None
return result
# Test with different subdomains
urls = [
"https://recruiting.ultipro.com/HOP1003HOPN/JobBoard/b3a1c5d7-6c5e-46f6-8478-cd649884f0ef",
"https://recruiting2.ultipro.com/HEN1009HPCC/JobBoard/abc123",
"https://recruiting.ultipro.ca/BFL5000BFLCA/JobBoard/xyz789",
]
for url in urls:
parsed = parse_ultipro_url(url)
print(f"Base: {parsed['base_url']}, Code: {parsed['company_code']}")Each UltiPro job board has a unique UUID board ID. This must be extracted from the company's careers page URL. Store board IDs for each company you scrape.
The LoadSearchResults API returns truncated BriefDescription fields. For full job descriptions, you must fetch individual OpportunityDetail HTML pages. Plan for extra HTTP requests.
UltiPro uses recruiting.ultipro.com, recruiting2.ultipro.com, and recruiting.ultipro.ca. Detect the correct subdomain from the company's actual URL to avoid 404 errors.
Some UltiPro instances require X-RequestVerificationToken header. If you receive 403 errors, extract this token from cookies or meta tags on the initial page load.
Some companies may have zero active postings. Handle the empty response gracefully and check totalCount before attempting pagination.
Implement delays between requests (1-2 seconds) and use exponential backoff for error responses. The API does not document official rate limits.
- 1Extract company code and board ID from the actual company URL before making API calls
- 2Use the LoadSearchResults API for efficient bulk job discovery
- 3Add 1-2 second delays between requests to avoid potential rate limiting
- 4Fetch full job descriptions from HTML only when needed (not for all jobs)
- 5Handle multiple subdomains (recruiting, recruiting2, recruiting.ultipro.ca)
- 6Store extracted board IDs to avoid re-discovery on subsequent scrapes
One endpoint. All UltiPro (UKG) jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=ultipro (ukg)" \
-H "X-Api-Key: YOUR_KEY" Access UltiPro (UKG)
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.