BambooHR Jobs API.
HR software for small and medium businesses with integrated applicant tracking.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on BambooHR.
- SMB coverage
- Clean JSON API
- Company info endpoint
- Location type mapping
- Rich job metadata
- Experience levels
- 01SMB job monitoring
- 02HR tech company tracking
- 03Mid-market talent sourcing
- 04Company validation workflows
How to scrape BambooHR.
Step-by-step guide to extracting jobs from BambooHR-powered career pages—endpoints, authentication, and working code.
import requests
def validate_company(subdomain: str) -> dict | None:
"""Validate a BambooHR company subdomain exists."""
url = f"https://{subdomain}.bamboohr.com/careers/company-info"
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
data = response.json()
return data.get("result")
except requests.RequestException:
return None
# Example usage
company = validate_company("cortina")
if company:
print(f"Company: {company['name']}")
else:
print("Invalid company subdomain")import requests
def fetch_job_listings(subdomain: str) -> list[dict]:
"""Fetch all job listings from a BambooHR company."""
url = f"https://{subdomain}.bamboohr.com/careers/list"
response = requests.get(url, timeout=10)
response.raise_for_status()
data = response.json()
jobs = data.get("result", [])
total_count = data.get("meta", {}).get("totalCount", 0)
print(f"Found {total_count} jobs for {subdomain}")
return jobs
# Example usage
jobs = fetch_job_listings("cortina")
for job in jobs[:3]:
print(f"- {job['jobOpeningName']} ({job['departmentLabel']})")def map_workplace_type(location_type: str | None) -> str:
"""Map BambooHR location type codes to readable values."""
workplace_map = {
"0": "On-site",
"1": "Remote",
"2": "Hybrid",
}
return workplace_map.get(location_type, "Not Specified")
# Parse job listings with workplace type
for job in jobs:
workplace = map_workplace_type(job.get("locationType"))
location = job.get("atsLocation", {})
print(f"{job['jobOpeningName']}: {workplace} - {location.get('city', 'N/A')}, {location.get('state', 'N/A')}")import requests
import time
def fetch_job_details(subdomain: str, job_id: str) -> dict:
"""Fetch detailed information for a specific job."""
url = f"https://{subdomain}.bamboohr.com/careers/{job_id}/detail"
response = requests.get(url, timeout=10)
response.raise_for_status()
data = response.json()
return data.get("result", {}).get("jobOpening", {})
# Fetch details for all jobs (with rate limiting)
for job in jobs[:5]: # Limit to first 5 for example
details = fetch_job_details("cortina", job["id"])
print({
"title": details.get("jobOpeningName"),
"posted": details.get("datePosted"),
"experience": details.get("minimumExperience"),
"compensation": details.get("compensation"),
})
time.sleep(1) # Be respectful with rate limitingimport requests
import time
def scrape_bamboohr_jobs(subdomain: str) -> list[dict]:
"""Complete BambooHR job scraper with error handling."""
base_url = f"https://{subdomain}.bamboohr.com"
# Validate company exists
try:
info_resp = requests.get(f"{base_url}/careers/company-info", timeout=10)
info_resp.raise_for_status()
company_name = info_resp.json().get("result", {}).get("name", subdomain)
except requests.RequestException:
print(f"Company '{subdomain}' not found or invalid")
return []
# Fetch job listings
try:
list_resp = requests.get(f"{base_url}/careers/list", timeout=10)
list_resp.raise_for_status()
listings = list_resp.json().get("result", [])
except requests.RequestException as e:
print(f"Error fetching listings: {e}")
return []
# Fetch details for each job
results = []
for job in listings:
try:
detail_url = f"{base_url}/careers/{job['id']}/detail"
detail_resp = requests.get(detail_url, timeout=10)
detail_resp.raise_for_status()
details = detail_resp.json().get("result", {}).get("jobOpening", {})
results.append({
"id": job["id"],
"title": job["jobOpeningName"],
"company": company_name,
"department": job.get("departmentLabel"),
"location": job.get("atsLocation", {}),
"workplace_type": map_workplace_type(job.get("locationType")),
"employment_type": job.get("employmentStatusLabel"),
"description": details.get("description"),
"posted_date": details.get("datePosted"),
"experience": details.get("minimumExperience"),
"compensation": details.get("compensation"),
"url": details.get("jobOpeningShareUrl"),
})
time.sleep(0.5) # Rate limiting
except requests.RequestException as e:
print(f"Error fetching job {job['id']}: {e}")
return results
# Run the scraper
jobs = scrape_bamboohr_jobs("cortina")
print(f"Scraped {len(jobs)} complete job listings")Not all companies use BambooHR's career page feature, or they may use a different subdomain format. Always validate with the /careers/company-info endpoint before scraping.
BambooHR returns both 'location' and 'atsLocation' objects with slightly different data. Prefer 'atsLocation' as it typically has more complete country and state information.
The /careers/list endpoint does not include the datePosted field. You must fetch job details from /careers/{id}/detail to get the posted date.
Not all companies expose salary information, and the format varies (string vs object). Always check for null values and handle both formats gracefully.
BambooHR does not publish rate limits. Implement 1-2 second delays between requests and use exponential backoff on 429 errors.
For remote positions, the 'location' object may have null values. Always check 'atsLocation' as a fallback, and use locationType '1' to identify remote jobs.
- 1Always validate company subdomains with the company-info endpoint before scraping
- 2Use atsLocation over location for more complete address data
- 3Implement rate limiting with 1-2 second delays between requests
- 4Fetch job details separately to get posted dates and full descriptions
- 5Cache results - job boards typically update daily or weekly
- 6Handle null compensation gracefully - not all companies expose salary data
One endpoint. All BambooHR jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=bamboohr" \
-H "X-Api-Key: YOUR_KEY" Access BambooHR
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.