GoHire Jobs API.
Simple and affordable applicant tracking system designed for small businesses and growing companies.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on GoHire.
- SMB-focused
- Easy to use interface
- Affordable pricing
- Job board posting
- Application tracking
- Career page builder
- 01Small business hiring
- 02Startup recruiting
- 03Multi-location job posting
- 04Candidate pipeline management
How to scrape GoHire.
Step-by-step guide to extracting jobs from GoHire-powered career pages—endpoints, authentication, and working code.
import requests
from bs4 import BeautifulSoup
import re
company_slug = "dexerto-de5jlhjo"
url = f"https://jobs.gohire.io/{company_slug}"
response = requests.get(url, timeout=10)
soup = BeautifulSoup(response.text, 'html.parser')
print(f"Fetched listings page: {len(response.text)} bytes")# Extract job IDs from anchor tags
job_ids = []
for link in soup.select('a.gohire-job'):
href = link.get('href', '')
# Job IDs are the numeric portion at the end of URLs like /job-slug-12345/
match = re.search(r'/(\d{5,})/?$', href)
if match:
job_ids.append(match.group(1))
# Remove duplicates while preserving order
job_ids = list(dict.fromkeys(job_ids))
print(f"Found {len(job_ids)} job IDs: {job_ids[:5]}...")import requests
def fetch_job_details(job_id: str) -> dict | None:
url = "https://api.gohire.io/getJobId"
# API expects form-data, not JSON
data = {"jobId": job_id}
response = requests.post(
url,
data=data,
headers={"Content-Type": "application/x-www-form-urlencoded"},
timeout=10
)
result = response.json()
# API returns an array - get first element
return result[0] if result else None
# Fetch details for first job
job = fetch_job_details(job_ids[0])
if job:
print(f"Title: {job.get('jobTitle')}")
print(f"Company: {job.get('companyName')}")import base64
def transform_job(job: dict) -> dict:
# Decode the base64 apply URL if present
apply_url = None
if job.get('applyUrl'):
try:
apply_url = base64.b64decode(job['applyUrl']).decode('utf-8')
except:
apply_url = job.get('applyUrl')
# Build salary string if available
salary = None
if job.get('fromSalary') and job.get('toSalary'):
currency = job.get('currencySymbol', '')
salary = f"{currency}{job['fromSalary']} - {currency}{job['toSalary']} {job.get('salaryUnit', '')}"
return {
"id": job.get("jobId"),
"title": job.get("jobTitle"),
"company": job.get("companyName"),
"location": f"{job.get('jobCounty', '')}, {job.get('countryName', '')}",
"type": job.get("jobType"),
"salary": salary,
"description_html": job.get("jobDescr"),
"apply_url": apply_url,
"posted_timestamp": job.get("jobCreatedOn"),
"client_hash": job.get("clientHash"),
}
transformed = transform_job(job)
print(f"Transformed: {transformed['title']} at {transformed['company']}")import time
from typing import List, Dict
def scrape_gohire_company(company_slug: str) -> List[Dict]:
jobs = []
# Step 1: Fetch listings page
try:
url = f"https://jobs.gohire.io/{company_slug}"
response = requests.get(url, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
except requests.RequestException as e:
print(f"Error fetching listings: {e}")
return jobs
# Step 2: Extract job IDs
job_ids = []
for link in soup.select('a.gohire-job'):
href = link.get('href', '')
match = re.search(r'/(\d{5,})/?$', href)
if match:
job_ids.append(match.group(1))
job_ids = list(dict.fromkeys(job_ids))
# Step 3: Fetch each job's details
for i, job_id in enumerate(job_ids):
try:
time.sleep(0.5) # Conservative rate limiting
job = fetch_job_details(job_id)
if job:
jobs.append(transform_job(job))
print(f"[{i+1}/{len(job_ids)}] Fetched: {job.get('jobTitle')}")
except Exception as e:
print(f"Error fetching job {job_id}: {e}")
return jobs
# Run the scraper
all_jobs = scrape_gohire_company("dexerto-de5jlhjo")
print(f"\nTotal jobs scraped: {len(all_jobs)}")import requests
def discover_gohire_companies() -> dict:
"""Fetch sitemap and extract unique company slugs with their job counts."""
url = "https://jobs.gohire.io/sitemap.txt"
response = requests.get(url, timeout=30)
response.raise_for_status()
companies = {}
for line in response.text.strip().split('\n'):
# URLs look like: https://jobs.gohire.io/company-slug/job-title-12345/
parts = line.strip().split('/')
if len(parts) >= 5:
company_slug = parts[3] # company-slug
if company_slug not in companies:
companies[company_slug] = {"slug": company_slug, "job_count": 0}
companies[company_slug]["job_count"] += 1
return companies
# Discover all GoHire companies
companies = discover_gohire_companies()
print(f"Found {len(companies)} companies")
for slug, data in list(companies.items())[:5]:
print(f" {slug}: {data['job_count']} jobs")The getJobId endpoint always returns an array, even for a single job. Always access the first element with result[0] to get the job data.
Use Content-Type: application/x-www-form-urlencoded and pass data as form fields, not JSON body. Use requests.post(url, data={'jobId': id}) not json={'jobId': id}.
Job IDs must be extracted from the HTML listings page by parsing anchor tags with class 'gohire-job' and extracting the numeric ID from URLs.
GoHire has no public directory. Find slugs by checking company careers pages or use the sitemap at jobs.gohire.io/sitemap.txt to discover company URLs.
Job detail pages contain JSON-LD structured data as a fallback. Parse the <script type='application/ld+json'> tag to extract job information when the API is unavailable.
The applyUrl field contains base64-encoded data. Decode it using base64.b64decode() before using it as a URL.
Not all jobs have salary information. Always check for null/empty values before accessing fromSalary, toSalary, and other optional fields.
- 1Use the sitemap.txt file to discover all GoHire companies and job URLs
- 2Add 500ms delays between API requests to avoid overwhelming the server
- 3Always handle the array response from getJobId by accessing the first element
- 4Use form-data encoding (application/x-www-form-urlencoded) for API requests
- 5Cache job listings locally - GoHire pages typically don't change frequently
- 6Decode base64 apply URLs before storing or redirecting users
One endpoint. All GoHire jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=gohire" \
-H "X-Api-Key: YOUR_KEY" Access GoHire
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.