Jobvite Jobs API.
End-to-end talent acquisition suite used by mid-market and enterprise companies.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Jobvite.
- Mid-market coverage
- Detailed listings
- Company data
- Requirements info
- Application details
- 01Enterprise job monitoring
- 02Mid-market company tracking
- 03Competitive talent intelligence
- 04Career page aggregation
How to scrape Jobvite.
Step-by-step guide to extracting jobs from Jobvite-powered career pages—endpoints, authentication, and working code.
import requests
from bs4 import BeautifulSoup
company_slug = "nutanix"
url = f"https://jobs.jobvite.com/{company_slug}"
response = requests.get(url, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
print(f"Page loaded: {len(response.text)} bytes")import re
# Find all job links on the page
job_links = soup.find_all("a", href=re.compile(r"/job/[a-z0-9]{9}"))
jobs = []
for link in job_links:
href = link.get("href", "")
# Extract job ID from URL like "/nutanix/job/ow6lzfwg"
match = re.search(r"/job/([a-z0-9]{9})", href)
if match:
job_id = match.group(1)
jobs.append({
"id": job_id,
"title": link.get_text(strip=True),
"url": f"https://jobs.jobvite.com{href}"
})
print(f"Found {len(jobs)} jobs")
for job in jobs[:5]:
print(f" - {job['title']} ({job['id']})")import time
def get_all_jobs(company_slug: str) -> list:
base_url = f"https://jobs.jobvite.com/{company_slug}/search"
all_jobs = []
page = 0
while True:
url = f"{base_url}/?p={page}"
response = requests.get(url, timeout=10)
soup = BeautifulSoup(response.text, "html.parser")
# Find job links on this page
job_links = soup.find_all("a", href=re.compile(r"/job/[a-z0-9]{9}"))
if not job_links:
break
for link in job_links:
href = link.get("href", "")
match = re.search(r"/job/([a-z0-9]{9})", href)
if match:
all_jobs.append({
"id": match.group(1),
"title": link.get_text(strip=True),
"url": f"https://jobs.jobvite.com{href}"
})
print(f"Page {page}: found {len(job_links)} jobs")
page += 1
time.sleep(0.5) # Be respectful
return all_jobsdef get_job_details(company_slug: str, job_id: str) -> dict:
url = f"https://jobs.jobvite.com/{company_slug}/job/{job_id}"
response = requests.get(url, timeout=10)
soup = BeautifulSoup(response.text, "html.parser")
# Extract structured data from known CSS classes
title_elem = soup.find("h2", class_="jv-header")
meta_elem = soup.find("p", class_="jv-job-detail-meta")
desc_elem = soup.find("div", class_="jv-job-detail-description")
job = {
"id": job_id,
"url": url,
"title": title_elem.get_text(strip=True) if title_elem else None,
"description_html": str(desc_elem) if desc_elem else None,
}
# Parse metadata (category, location, req number)
if meta_elem:
meta_text = meta_elem.get_text(separator="|", strip=True)
parts = [p.strip() for p in meta_text.split("|") if p.strip()]
if len(parts) >= 1:
job["category"] = parts[0]
if len(parts) >= 2:
job["location"] = parts[1]
if len(parts) >= 3:
job["requisition_number"] = parts[2]
return job
# Example usage
job = get_job_details("nutanix", "ow6lzfwg")
print(f"Title: {job['title']}")
print(f"Location: {job.get('location')}")def get_facets(company_slug: str, location: str = None) -> dict:
url = f"https://jobs.jobvite.com/{company_slug}/search/facets"
params = {"nl": 1}
if location:
params["l"] = location
response = requests.get(url, params=params, timeout=10)
response.raise_for_status()
data = response.json()
return data.get("facets", {})
# Get all available filters
facets = get_facets("nutanix")
print("Available locations:", len(facets.get("locations", [])))
print("Available categories:", len(facets.get("categories", [])))
print("Available departments:", len(facets.get("departments", [])))Jobvite uses server-side HTML rendering only. You must parse HTML responses using BeautifulSoup or similar libraries. The /search/facets endpoint provides filter options but not job data.
Some companies use custom domains (e.g., careers.company.com) instead of jobs.jobvite.com. Check the actual career page URL and adapt your scraper accordingly. The HTML structure is usually consistent.
Featured jobs are included in the main listing but appear first. Deduplicate by job ID if you need a clean list, or track featured status separately.
Job IDs are always 9-character lowercase alphanumeric strings. Ensure your regex pattern matches only lowercase: [a-z0-9]{9}
Categories with many jobs show a 'Show More' link that navigates to the search page. Use the paginated search endpoint (/search/?p={page}) to get all jobs in a category.
- 1Use the paginated search endpoint for companies with 50+ jobs
- 2Add delays between requests (500ms-1s) to be respectful
- 3Cache job listings - they typically update daily at most
- 4Parse the job detail meta section for category, location, and requisition number
- 5Use the facets API to discover available filter options before scraping
- 6Handle custom domains by detecting the URL structure before parsing
One endpoint. All Jobvite jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=jobvite" \
-H "X-Api-Key: YOUR_KEY" Access Jobvite
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.