Workable Jobs API.
All-in-one recruiting software used by growing companies worldwide.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Workable.
- Growth company focus
- Global coverage
- Structured data
- Team info
- Location flexibility
- Rich metadata
- 01SMB job aggregation
- 02Global talent sourcing
- 03Industry-specific tracking
How to scrape Workable.
Step-by-step guide to extracting jobs from Workable-powered career pages—endpoints, authentication, and working code.
import re
from urllib.parse import urlparse
def extract_account_slug(url: str) -> str | None:
"""Extract Workable account slug from various URL formats."""
parsed = urlparse(url)
hostname = parsed.hostname or ""
path = parsed.path
# Primary format: apply.workable.com/{slug}
if hostname == "apply.workable.com":
match = re.match(r"^/([^/]+)", path)
return match.group(1) if match else None
# Subdomain format: {slug}.workable.com
if hostname.endswith(".workable.com") and hostname != "jobs.workable.com":
return hostname.replace(".workable.com", "")
# Jobs board format: jobs.workable.com/company/{id}
if hostname == "jobs.workable.com":
match = re.search(r"/company/([^/]+)", path)
return match.group(1) if match else None
return None
# Test with different URL formats
slug = extract_account_slug("https://apply.workable.com/2070health/")
print(f"Account slug: {slug}") # "2070health"import requests
import time
def fetch_workable_listings(account_slug: str, max_pages: int = 10) -> list[dict]:
"""Fetch all job listings from Workable with pagination."""
url = f"https://apply.workable.com/api/v3/accounts/{account_slug}/jobs"
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Origin": "https://apply.workable.com",
"Referer": f"https://apply.workable.com/{account_slug}/"
}
all_jobs = []
token = None
for page in range(1, max_pages + 1):
payload = {
"query": "",
"token": token,
"department": [],
"location": [],
"workplace": [],
"worktype": []
}
response = requests.post(url, json=payload, headers=headers, timeout=30)
response.raise_for_status()
data = response.json()
jobs = data.get("results", [])
if not jobs:
break
all_jobs.extend(jobs)
print(f"Page {page}: fetched {len(jobs)} jobs (total: {len(all_jobs)})")
# Check for next page token
token = data.get("nextPage")
if not token:
break
time.sleep(0.2) # Rate limiting
return all_jobs
# Fetch listings for a company
jobs = fetch_workable_listings("2070health")
print(f"Total jobs found: {len(jobs)}")import requests
import time
def fetch_job_details(account_slug: str, shortcode: str) -> dict:
"""Fetch full details for a single job."""
url = f"https://apply.workable.com/api/v2/accounts/{account_slug}/jobs/{shortcode}"
headers = {
"Accept": "application/json",
"Referer": f"https://apply.workable.com/{account_slug}/j/{shortcode}/"
}
response = requests.get(url, headers=headers, timeout=30)
response.raise_for_status()
return response.json()
def fetch_all_job_details(account_slug: str, listings: list[dict]) -> list[dict]:
"""Fetch details for all jobs in the listings."""
details = []
for i, job in enumerate(listings):
shortcode = job["shortcode"]
try:
detail = fetch_job_details(account_slug, shortcode)
# Combine description sections
full_description = "\n".join(filter(None, [
detail.get("description", ""),
detail.get("requirements", ""),
detail.get("benefits", "")
]))
details.append({
"id": detail["id"],
"shortcode": detail["shortcode"],
"title": detail["title"],
"description": full_description,
"department": detail.get("department", []),
"location": detail.get("location", {}),
"workplace": detail.get("workplace"),
"remote": detail.get("remote", False),
"employment_type": detail.get("type"),
"published": detail.get("published"),
"url": f"https://apply.workable.com/{account_slug}/j/{shortcode}/"
})
print(f"[{i+1}/{len(listings)}] Fetched: {detail['title']}")
except requests.RequestException as e:
print(f"[{i+1}/{len(listings)}] Failed to fetch {shortcode}: {e}")
time.sleep(0.3) # Rate limiting
return details
# Fetch all details
all_details = fetch_all_job_details("2070health", jobs[:5]) # First 5 jobs
print(f"Fetched details for {len(all_details)} jobs")import requests
def fetch_account_info(account_slug: str) -> dict:
"""Fetch company/account information."""
url = f"https://apply.workable.com/api/v1/accounts/{account_slug}"
params = {"full": "true"}
headers = {
"Accept": "application/json",
"Referer": f"https://apply.workable.com/{account_slug}/"
}
response = requests.get(url, params=params, headers=headers, timeout=30)
response.raise_for_status()
return response.json()
# Get company info
account = fetch_account_info("2070health")
print(f"Company: {account.get('name')}")
print(f"Website: {account.get('url')}")
print(f"Logo: {account.get('logo')}")import requests
import time
def scrape_workable_jobs(account_slug: str) -> dict:
"""Complete Workable scraper with error handling."""
result = {
"account": None,
"jobs": [],
"errors": []
}
try:
# Get account info
result["account"] = fetch_account_info(account_slug)
except requests.HTTPError as e:
if e.response.status_code == 404:
result["errors"].append("Account not found - verify the slug")
return result
result["errors"].append(f"Account API error: {e}")
try:
# Fetch listings with retry
max_retries = 3
for attempt in range(max_retries):
try:
listings = fetch_workable_listings(account_slug)
break
except requests.HTTPError as e:
if e.response.status_code == 429:
wait = 2 ** attempt
print(f"Rate limited, waiting {wait}s...")
time.sleep(wait)
else:
raise
# Fetch details with graceful handling
for job in listings:
try:
detail = fetch_job_details(account_slug, job["shortcode"])
result["jobs"].append(detail)
time.sleep(0.3)
except requests.HTTPError as e:
result["errors"].append(f"Job {job['shortcode']}: {e}")
except Exception as e:
result["errors"].append(f"Scraping failed: {e}")
return result
# Run the scraper
result = scrape_workable_jobs("2070health")
print(f"Scraped {len(result['jobs'])} jobs with {len(result['errors'])} errors")Verify the account exists by checking the apply.workable.com URL in a browser. Some companies use custom domains. Try different URL formats (subdomain.workable.com vs apply.workable.com/slug).
Some companies have no open positions or hide jobs behind filters. Try with empty filter arrays first. Check if the company has published jobs by visiting their job board directly.
Add 200-300ms delays between requests. Workable doesn't publish official rate limits, but aggressive scraping will be throttled. Implement exponential backoff for 429 errors.
Not all companies fill in all sections. Handle null/empty fields gracefully. The description field is most reliably present, while requirements and benefits may be empty strings.
The nextPage token is base64-encoded and should be passed exactly as received. If pagination fails, start fresh with token=null. Set a maximum page limit to prevent infinite loops.
The jobs.workable.com format uses viewId instead of shortcode. Extract the account slug from the company page URL or use the API to resolve the internal ID mapping.
- 1Use v3 API for listings and v2 API for job details
- 2Add 200-300ms delay between requests to avoid rate limiting
- 3Combine description, requirements, and benefits fields for complete job content
- 4Handle empty filter arrays - do not pre-fill department/location filters
- 5Implement exponential backoff for 429 rate limit errors
- 6Cache results - job boards typically update daily, not hourly
One endpoint. All Workable jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=workable" \
-H "X-Api-Key: YOUR_KEY" Access Workable
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.