Ashby Jobs API.
All-in-one recruiting platform popular with modern tech companies and startups.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Ashby.
- Modern tech companies
- Startup ecosystem
- Detailed job specs
- Team information
- Compensation data
- 01Startup tracking
- 02Tech talent sourcing
- 03Early-stage company monitoring
How to scrape Ashby.
Step-by-step guide to extracting jobs from Ashby-powered career pages—endpoints, authentication, and working code.
import requests
company_slug = "limble"
url = f"https://api.ashbyhq.com/posting-api/job-board/{company_slug}"
params = {"includeCompensation": "true"}
response = requests.get(url, params=params, timeout=10)
response.raise_for_status()
data = response.json()
# All jobs returned in a single response - no pagination needed
jobs = [j for j in data["jobs"] if j.get("isListed", True)]
print(f"Found {len(jobs)} active jobs for {company_slug}")for job in jobs:
# Extract address details if available
address = job.get("address", {}).get("postalAddress", {})
job_data = {
"id": job["id"], # UUID format
"title": job["title"],
"department": job.get("department"),
"team": job.get("team"),
"location": job.get("location"),
"city": address.get("addressLocality"),
"country": address.get("addressCountry"),
"is_remote": job.get("isRemote", False),
"employment_type": job.get("employmentType"),
"job_url": job.get("jobUrl"),
"apply_url": job.get("applyUrl"),
"published_at": job.get("publishedAt"),
"salary": job.get("compensation", {}).get("compensationTierSummary"),
"description_html": job.get("descriptionHtml", "")[:200] + "...",
"description_plain": job.get("descriptionPlain", "")[:200] + "...",
}
print(f"{job_data['title']} - {job_data['location']}")import requests
def validate_company(slug: str) -> dict | None:
url = "https://jobs.ashbyhq.com/api/non-user-graphql"
params = {"op": "ApiOrganizationFromHostedJobsPageName"}
payload = {
"operationName": "ApiOrganizationFromHostedJobsPageName",
"variables": {
"organizationHostedJobsPageName": slug,
"searchContext": "JobBoard",
},
"query": """query ApiOrganizationFromHostedJobsPageName(
$organizationHostedJobsPageName: String!,
$searchContext: String
) {
organization(
organizationHostedJobsPageName: $organizationHostedJobsPageName
searchContext: $searchContext
) { name publicWebsite hostedJobsPageSlug allowJobPostIndexing }
}"""
}
resp = requests.post(url, json=payload, params=params, timeout=10)
return resp.json()["data"]["organization"] # None if invalid
# Test validation
org = validate_company("limble")
if org:
print(f"Company: {org['name']}, Website: {org['publicWebsite']}")
else:
print("Invalid company slug")def parse_all_locations(job: dict) -> list:
locations = []
# Primary location
if job.get("location"):
locations.append({
"type": "primary",
"location": job["location"],
"remote": job.get("isRemote", False)
})
# Secondary locations
for loc in job.get("secondaryLocations", []):
locations.append({
"type": "secondary",
"location": loc.get("location"),
"remote": loc.get("isRemote", False)
})
return locations
# Process jobs with all location options
for job in jobs:
all_locations = parse_all_locations(job)
print(f"{job['title']}: {len(all_locations)} location(s)")import time
import requests
def fetch_jobs_batch(slugs: list, delay: float = 0.6) -> dict:
results = {}
for slug in slugs:
url = f"https://api.ashbyhq.com/posting-api/job-board/{slug}"
try:
resp = requests.get(url, params={"includeCompensation": "true"}, timeout=10)
resp.raise_for_status()
data = resp.json()
active_jobs = [j for j in data.get("jobs", []) if j.get("isListed", True)]
results[slug] = active_jobs
print(f"{slug}: {len(active_jobs)} jobs")
except requests.RequestException as e:
print(f"Error fetching {slug}: {e}")
results[slug] = []
time.sleep(delay) # ~100 req/min with 0.6s delay
return results
companies = ["limble", "ramp", "notion", "linear"]
all_jobs = fetch_jobs_batch(companies)
print(f"Total: {sum(len(j) for j in all_jobs.values())} jobs")import requests
import time
def scrape_ashby_company(slug: str) -> list[dict]:
# Validate company first
validate_url = "https://jobs.ashbyhq.com/api/non-user-graphql"
validate_resp = requests.post(
validate_url,
json={
"operationName": "ApiOrganizationFromHostedJobsPageName",
"variables": {"organizationHostedJobsPageName": slug, "searchContext": "JobBoard"},
"query": "query ApiOrganizationFromHostedJobsPageName($organizationHostedJobsPageName: String!, $searchContext: String) { organization(organizationHostedJobsPageName: $organizationHostedJobsPageName, searchContext: $searchContext) { name } }"
},
params={"op": "ApiOrganizationFromHostedJobsPageName"},
timeout=10
)
if not validate_resp.json()["data"]["organization"]:
print(f"Invalid company: {slug}")
return []
# Fetch jobs
url = f"https://api.ashbyhq.com/posting-api/job-board/{slug}"
resp = requests.get(url, params={"includeCompensation": "true"}, timeout=10)
resp.raise_for_status()
return [j for j in resp.json().get("jobs", []) if j.get("isListed", True)]
jobs = scrape_ashby_company("limble")
print(f"Scraped {len(jobs)} jobs")The REST API returns {"jobs": []} for invalid companies. Use the GraphQL validation endpoint to verify slugs before scraping. Check if the company uses a custom domain instead of jobs.ashbyhq.com.
Not all companies expose salary information. Always include includeCompensation=true parameter. Check both compensationTierSummary and scrapeableCompensationSalarySummary fields, and handle null values gracefully.
Add delays between requests (500-600ms). The API has unofficial rate limits around 100 requests per minute. Implement exponential backoff for 429 errors.
Jobs with isListed=false are unlisted or draft jobs. Always filter these out: [j for j in jobs if j.get('isListed', True)] to avoid capturing non-public positions.
The GraphQL endpoint returns {"data": {"organization": null}} for invalid slugs. Ensure the operationName matches the 'op' query parameter exactly. Use the REST posting-api for simpler integration.
Ashby does not provide a sitemap.xml. Use manual databases, Google dorks (site:jobs.ashbyhq.com), or link scanning from known company career pages to discover new companies.
Ashby does not version their API (apiVersion: "1"). Monitor for field changes and use defensive coding with .get() methods. The schema may change without notice.
- 1Use the REST posting-api endpoint instead of GraphQL - it returns complete job data in one call without pagination
- 2Always include includeCompensation=true parameter to get salary information when available
- 3Filter jobs by isListed=true to exclude draft and unlisted positions
- 4Rate limit to approximately 100 requests per minute (600ms delay between requests)
- 5Use GraphQL validation endpoint to verify company slugs before bulk scraping operations
- 6Check secondaryLocations array for jobs with multiple work location options
- 7Cache results - job boards typically update daily, avoid real-time scraping
One endpoint. All Ashby jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=ashby" \
-H "X-Api-Key: YOUR_KEY" Access Ashby
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.