Trakstar Hire Jobs API.
Performance management and recruiting platform with RSS feed support for complete job data extraction.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on Trakstar Hire.
- RSS feed support
- Full job descriptions
- Location metadata
- Department filtering
- Application deadline tracking
How to scrape Trakstar Hire.
Step-by-step guide to extracting jobs from Trakstar Hire-powered career pages—endpoints, authentication, and working code.
# RSS feed URL pattern
company_slug = "terrapower"
company_name = "TerraPower" # Note: capitalization matters in the path
rss_url = f"https://{company_slug}.hire.trakstar.com/jobfeeds/{company_name}"
print(f"RSS Feed URL: {rss_url}")
# Output: https://terrapower.hire.trakstar.com/jobfeeds/TerraPowerimport requests
import xml.etree.ElementTree as ET
def fetch_trakstar_jobs(company_slug: str, company_name: str) -> list:
rss_url = f"https://{company_slug}.hire.trakstar.com/jobfeeds/{company_name}"
response = requests.get(rss_url, timeout=30)
response.raise_for_status()
# Parse XML with namespace support
root = ET.fromstring(response.content)
# Define namespaces
namespaces = {
'job': 'https://recruiterbox.com/rss/job/'
}
jobs = []
for item in root.findall('.//item'):
job = {
'title': item.find('title').text,
'link': item.find('link').text,
'description': item.find('description').text,
'pub_date': item.find('pubDate').text,
'location_city': item.find('job:locationCity', namespaces).text if item.find('job:locationCity', namespaces) is not None else None,
'location_state': item.find('job:locationState', namespaces).text if item.find('job:locationState', namespaces) is not None else None,
'location_country': item.find('job:locationCountry', namespaces).text if item.find('job:locationCountry', namespaces) is not None else None,
'position_type': item.find('job:positionType', namespaces).text if item.find('job:positionType', namespaces) is not None else None,
'team': item.find('job:team', namespaces).text if item.find('job:team', namespaces) is not None else None,
'close_date': item.find('job:closeDate', namespaces).text if item.find('job:closeDate', namespaces) is not None else None,
}
jobs.append(job)
return jobs
jobs = fetch_trakstar_jobs("terrapower", "TerraPower")
print(f"Found {len(jobs)} jobs")from datetime import datetime
def process_job_data(jobs: list) -> list:
processed = []
for job in jobs:
# Extract job ID from URL
job_id = job['link'].rstrip('/').split('/')[-1]
# Parse publication date
pub_date = None
if job.get('pub_date'):
try:
pub_date = datetime.strptime(job['pub_date'], '%a, %d %b %Y %H:%M:%S %z')
except ValueError:
pass
# Parse close date (application deadline)
close_date = None
if job.get('close_date'):
try:
close_date = datetime.strptime(job['close_date'], '%Y-%m-%d')
except ValueError:
pass
processed.append({
'id': job_id,
'title': job.get('title'),
'url': job.get('link'),
'description_html': job.get('description'),
'location': {
'city': job.get('location_city'),
'state': job.get('location_state'),
'country': job.get('location_country'),
},
'department': job.get('team'),
'employment_type': job.get('position_type'),
'published_at': pub_date.isoformat() if pub_date else None,
'closes_at': close_date.isoformat() if close_date else None,
})
return processed
processed_jobs = process_job_data(jobs)
for job in processed_jobs[:3]:
print(f"{job['title']} - {job['location'].get('city', 'Unknown')}")import requests
import xml.etree.ElementTree as ET
from typing import Optional
def fetch_jobs_safe(company_slug: str, company_name: str) -> Optional[list]:
"""Fetch jobs with comprehensive error handling."""
rss_url = f"https://{company_slug}.hire.trakstar.com/jobfeeds/{company_name}"
try:
response = requests.get(rss_url, timeout=30)
if response.status_code == 404:
print(f"RSS feed not found. Check company name capitalization.")
# Try fallback: check sitemap
sitemap_url = f"https://{company_slug}.hire.trakstar.com/sitemap.xml"
sitemap_resp = requests.get(sitemap_url, timeout=10)
if sitemap_resp.status_code == 200:
print("Sitemap available as fallback for job URLs")
return None
response.raise_for_status()
root = ET.fromstring(response.content)
items = root.findall('.//item')
if not items:
print("No jobs found in RSS feed")
return []
return items
except requests.Timeout:
print("Request timed out")
return None
except requests.RequestException as e:
print(f"Request failed: {e}")
return None
except ET.ParseError as e:
print(f"XML parsing failed: {e}")
return None
# Usage
jobs = fetch_jobs_safe("terrapower", "TerraPower")import requests
from bs4 import BeautifulSoup
def scrape_html_fallback(company_slug: str) -> list:
"""Scrape jobs from HTML pages when RSS is unavailable."""
base_url = f"https://{company_slug}.hire.trakstar.com"
jobs = []
page = 1
while True:
url = f"{base_url}/?p={page}"
response = requests.get(url, timeout=30)
soup = BeautifulSoup(response.content, 'html.parser')
# Find job listings
openings = soup.select('.opening')
if not openings:
break
for opening in openings:
title_elem = opening.select_one('h3 a')
location_elem = opening.select_one('.location')
dept_elem = opening.select_one('.department')
if title_elem:
jobs.append({
'title': title_elem.text.strip(),
'url': base_url + title_elem.get('href', ''),
'location': location_elem.text.strip() if location_elem else None,
'department': dept_elem.text.strip() if dept_elem else None,
})
# Check for next page
paginator = soup.select_one('.pagination')
if not paginator or f'?p={page + 1}' not in str(paginator):
break
page += 1
return jobs
# Use as fallback
# jobs = scrape_html_fallback("terrapower")The company name in the path must use correct capitalization (e.g., 'TerraPower' not 'terrapower'). Check the actual company name from their careers page or try the sitemap.xml as a fallback.
The RSS feed uses the 'job:' namespace prefix. Register the namespace 'https://recruiterbox.com/rss/job/' when parsing XML to access location, team, and position type fields.
RSS feeds with many jobs can be large. Increase request timeout to 30+ seconds and consider streaming the response rather than loading it all into memory.
Trakstar Hire has no public JSON API. The /api/jobs and /api/jobs/{id} endpoints return 404. Use the RSS feed or HTML scraping instead.
Not all jobs include the application deadline. Handle null values for job:closeDate gracefully in your parsing logic.
- 1Use the RSS feed as the primary data source for complete job data in a single request
- 2Match company name capitalization exactly in the RSS path (case-sensitive)
- 3Register the job: namespace when parsing XML to access structured metadata
- 4Set request timeout to 30+ seconds for large RSS feeds
- 5Implement HTML scraping as a fallback when RSS returns 404
- 6Cache results - job boards typically update daily, avoid redundant requests
One endpoint. All Trakstar Hire jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=trakstar hire" \
-H "X-Api-Key: YOUR_KEY" Access Trakstar Hire
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.