Taleo Jobs API.
Oracle's enterprise talent management cloud used by large corporations worldwide for recruiting and talent acquisition.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Taleo.
- Enterprise coverage
- Global companies
- REST API for listings
- HTML details parsing
- Multi-section support
- Filtering capabilities
- 01Enterprise job aggregation
- 02Global company tracking
- 03Large-scale talent sourcing
- 04Multi-region job monitoring
How to scrape Taleo.
Step-by-step guide to extracting jobs from Taleo-powered career pages—endpoints, authentication, and working code.
import requests
# Company configuration
company = "hdr"
section = "ex" # Common sections: ex, 1, 2, 10000
portal_id = "101430233" # Found in network requests
base_url = f"https://{company}.taleo.net"
search_url = f"{base_url}/careersection/rest/jobboard/searchjobs?lang=en&portal={portal_id}"
print(f"Taleo instance: {base_url}")
print(f"Career section: {section}")import requests
url = f"https://{company}.taleo.net/careersection/rest/jobboard/searchjobs?lang=en&portal={portal_id}"
headers = {
"Content-Type": "application/json",
"X-Requested-With": "XMLHttpRequest",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Referer": f"https://{company}.taleo.net/careersection/{section}/jobsearch.ftl",
}
payload = {
"multilineEnabled": False,
"sortingSelection": {
"sortBySelectionParam": "1",
"ascendingSortingOrder": "false"
},
"fieldData": {
"fields": {
"KEYWORD": "",
"LOCATION": "",
"CATEGORY": ""
},
"valid": True
},
"pageNo": 1
}
response = requests.post(url, json=payload, headers=headers)
data = response.json()
jobs = data.get("requisitionList", [])
total_count = data.get("pagingData", {}).get("totalCount", 0)
print(f"Found {total_count} total jobs, fetched {len(jobs)} on page 1")import json
for job in jobs:
job_id = job.get("jobId")
contest_no = job.get("contestNo") # Use this for URLs
columns = job.get("column", [])
parsed_job = {
"job_id": job_id,
"contest_no": contest_no,
"title": columns[0] if len(columns) > 0 else None,
"location": json.loads(columns[1]) if len(columns) > 1 else [],
"posted_date": columns[2] if len(columns) > 2 else None,
"is_hot_job": job.get("hotJob", False),
"detail_url": f"https://{company}.taleo.net/careersection/{section}/jobdetail.ftl?job={contest_no}"
}
print(f"{parsed_job['title']} - {parsed_job['location']}")import requests
import time
all_jobs = []
page_no = 1
page_size = 25
while True:
payload["pageNo"] = page_no
response = requests.post(url, json=payload, headers=headers)
data = response.json()
jobs = data.get("requisitionList", [])
if not jobs:
break
all_jobs.extend(jobs)
paging = data.get("pagingData", {})
total_count = paging.get("totalCount", 0)
print(f"Page {page_no}: {len(jobs)} jobs (total: {len(all_jobs)}/{total_count})")
if len(all_jobs) >= total_count:
break
page_no += 1
time.sleep(0.5) # Rate limiting
print(f"Total jobs collected: {len(all_jobs)}")import requests
from bs4 import BeautifulSoup
def fetch_job_details(contest_no: str) -> dict:
detail_url = f"https://{company}.taleo.net/careersection/{section}/jobdetail.ftl?job={contest_no}"
response = requests.get(detail_url)
soup = BeautifulSoup(response.text, "html.parser")
# Extract job details from HTML
# Note: Selectors may vary by Taleo instance
title_elem = soup.find("h1") or soup.find(class_="title")
location_elem = soup.find(class_="location")
# Description is typically in main content area
description_elem = soup.find(class_="jobdescription")
if not description_elem:
description_elem = soup.find(id="jobdescription")
return {
"contest_no": contest_no,
"title": title_elem.get_text(strip=True) if title_elem else None,
"location": location_elem.get_text(strip=True) if location_elem else None,
"description": description_elem.get_text(strip=True) if description_elem else None,
"url": detail_url
}
# Fetch details for first job
if all_jobs:
first_job = all_jobs[0]
details = fetch_job_details(first_job["contestNo"])
print(f"Title: {details['title']}")import requests
def safe_fetch_jobs(company: str, section: str, portal_id: str) -> list:
url = f"https://{company}.taleo.net/careersection/rest/jobboard/searchjobs"
params = {"lang": "en", "portal": portal_id}
try:
response = requests.post(url, params=params, json=payload, headers=headers, timeout=30)
response.raise_for_status()
# Check for unavailable section
if "Career Section Unavailable" in response.text:
print(f"Career section '{section}' unavailable for {company}")
return []
data = response.json()
return data.get("requisitionList", [])
except requests.exceptions.Timeout:
print(f"Timeout fetching jobs for {company}")
return []
except requests.exceptions.RequestException as e:
print(f"Error fetching jobs for {company}: {e}")
return []
except ValueError as e:
print(f"JSON parse error for {company}: {e}")
return []
# Test with known working instance
jobs = safe_fetch_jobs("hdr", "ex", "101430233")
print(f"Fetched {len(jobs)} jobs")Taleo instances may be offline or use different section names. Try common sections (ex, 1, 2, 10000) and verify the portal ID matches the company. Some instances use tbe.taleo.net instead of taleo.net.
The portal ID is required for API calls. Inspect network requests on the jobsearch.ftl page to find the portal parameter in the searchjobs API call. It's typically a 9-12 digit number.
The API returns both jobId (internal) and contestNo (public). Always use contestNo when constructing jobdetail.ftl URLs, as this is the public-facing job identifier.
The searchjobs API only returns basic job info. You must fetch jobdetail.ftl pages and parse HTML to get full descriptions. There is no pure API method for descriptions.
Taleo instances may rate limit aggressive requests. Add 500ms-1s delays between requests and implement retry logic with exponential backoff for timeout errors.
Different Taleo versions and company customizations affect HTML structure. Test your CSS selectors against each target instance and use fallback selectors when possible.
The column[1] field contains location data as a JSON array string. Use json.loads() to parse it into a proper list before processing.
- 1Use the REST API for efficient job discovery instead of HTML scraping for listings
- 2Always extract portal ID from network requests before making API calls
- 3Use contestNo (not jobId) when building job detail URLs
- 4Parse location data with json.loads() since it's returned as a JSON string
- 5Add 500ms delays between requests to avoid rate limiting
- 6Handle 'Career Section Unavailable' errors gracefully with fallback sections
One endpoint. All Taleo jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=taleo" \
-H "X-Api-Key: YOUR_KEY" Access Taleo
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.