Lever Jobs API.
Modern recruiting software used by fast-growing companies. Quality job data with detailed role information.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Lever.
- Comprehensive job details
- Team & department data
- Work type classification
- Benefits information
- Application requirements
- 01Growth company tracking
- 02Tech talent sourcing
- 03Compensation research
How to scrape Lever.
Step-by-step guide to extracting jobs from Lever-powered career pages—endpoints, authentication, and working code.
import re
# URL pattern: https://jobs.lever.co/{companySlug}
# Examples:
# - https://jobs.lever.co/netflix -> slug: "netflix"
# - https://jobs.lever.co/spotify -> slug: "spotify"
url = "https://jobs.lever.co/netflix"
match = re.search(r"jobs\.lever\.co/([^/?#]+)", url)
if match:
company_slug = match.group(1)
print(f"Company slug: {company_slug}") # "netflix"
else:
print("Invalid Lever URL")import requests
def get_lever_jobs(company_slug: str) -> list:
"""Fetch all jobs from a Lever company job board."""
url = f"https://api.lever.co/v0/postings/{company_slug}?mode=json"
response = requests.get(url, timeout=10)
if response.status_code == 404:
raise ValueError(f'Company "{company_slug}" not found or API disabled')
response.raise_for_status()
jobs = response.json()
return jobs
# Usage
jobs = get_lever_jobs("netflix")
print(f"Found {len(jobs)} jobs")from datetime import datetime
def parse_lever_job(job: dict) -> dict:
"""Parse a Lever job posting into a structured format."""
categories = job.get("categories", {})
salary = job.get("salaryRange")
return {
"id": job.get("id"),
"title": job.get("text"),
"location": categories.get("location"),
"all_locations": categories.get("allLocations", [categories.get("location")]),
"department": categories.get("department"),
"team": categories.get("team"),
"employment_type": categories.get("commitment"),
"workplace_type": job.get("workplaceType"), # remote, hybrid, onsite
"description_plain": job.get("descriptionPlain") or job.get("description"),
"description_html": job.get("description"),
"description_body": job.get("descriptionBody"),
"opening": job.get("opening"),
"benefits": job.get("additional"),
"lists": job.get("lists", []), # Structured requirements/responsibilities
"salary": {
"min": salary.get("min"),
"max": salary.get("max"),
"currency": salary.get("currency"),
"interval": salary.get("interval")
} if salary else None,
"apply_url": job.get("applyUrl"),
"view_url": job.get("hostedUrl"),
"posted_at": datetime.fromtimestamp(job.get("createdAt", 0) / 1000),
"country": job.get("country")
}
# Parse all jobs
jobs = get_lever_jobs("netflix")
parsed_jobs = [parse_lever_job(job) for job in jobs]
print(parsed_jobs[0])def build_full_description(job: dict) -> str:
"""Build a complete HTML description from all Lever content sections."""
parts = []
# Add opening/intro section
if job.get("opening"):
parts.append(job["opening"])
# Add main description body
if job.get("descriptionBody"):
parts.append(job["descriptionBody"])
# Add structured lists (requirements, responsibilities)
for lst in job.get("lists", []):
if lst.get("text"):
parts.append(f"<h3>{lst['text']}</h3>")
if lst.get("content"):
parts.append(lst["content"])
# Add benefits/additional info
if job.get("additional"):
parts.append("<h3>Benefits & Perks</h3>")
parts.append(job["additional"])
return "\n\n".join(parts)
# Build full description for each job
for job in jobs:
full_html = build_full_description(job)
print(f"{job.get('text')}: {len(full_html)} chars")import time
import requests
def scrape_multiple_companies(slugs: list, delay_seconds: float = 1.0) -> dict:
"""Scrape jobs from multiple Lever companies with rate limiting."""
results = {}
for slug in slugs:
try:
url = f"https://api.lever.co/v0/postings/{slug}?mode=json"
response = requests.get(url, timeout=10)
if response.status_code == 404:
print(f"[SKIP] {slug}: Company not found")
results[slug] = {"jobs": [], "error": "not_found"}
else:
response.raise_for_status()
jobs = response.json()
results[slug] = {"jobs": jobs, "error": None}
print(f"[OK] {slug}: {len(jobs)} jobs")
except requests.RequestException as e:
print(f"[ERROR] {slug}: {e}")
results[slug] = {"jobs": [], "error": str(e)}
# Respect robots.txt crawl-delay of 1 second
time.sleep(delay_seconds)
return results
# Scrape multiple companies
companies = ["netflix", "spotify", "twitch", "lyft", "coinbase"]
results = scrape_multiple_companies(companies, delay_seconds=1.0)
total_jobs = sum(len(r["jobs"]) for r in results.values())
print(f"\nTotal jobs found: {total_jobs}")The company may not exist on Lever, may have changed their slug, or may have disabled the public API. Check the actual jobs.lever.co URL and verify the company still uses Lever.
Add 1+ second delays between requests as specified in robots.txt. The unofficial rate limit is ~100 requests per minute. Implement exponential backoff for repeated errors.
Not all companies expose salary information. The salaryRange field may be null or undefined. Handle missing salary data gracefully in your code.
Each company formats their job descriptions differently. Use descriptionPlain for consistent plain text, or implement HTML sanitization for the description field.
The company may have no open positions, or all jobs may be internal/hidden. Some companies set jobs to unlisted status. Verify by checking the actual job board in a browser.
- 1Use the public JSON API endpoint for reliable data extraction
- 2Extract company slug from URL pattern: jobs.lever.co/{companySlug}
- 3Respect the 1 second crawl-delay specified in robots.txt
- 4Use descriptionPlain field for consistent text without HTML
- 5Combine opening, descriptionBody, lists, and additional for complete descriptions
- 6Cache API responses - job boards typically update daily
One endpoint. All Lever jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=lever" \
-H "X-Api-Key: YOUR_KEY" Access Lever
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.