TalNet Jobs API.
Enterprise ATS platform (formerly Oleeo/WCN) using server-side rendered HTML job boards with RSS feed support.
Try the API.
Test Jobs, Feed, and Auto-Apply endpoints against https://connect.jobo.world with live request/response examples, then copy ready-to-use curl commands.
What's in every response.
Data fields, real-world applications, and the companies already running on TalNet.
- Enterprise focus
- Server-side rendering
- RSS/Atom feed support
- Multiple job boards per company
- International language support
- Government sector clients
- 01Enterprise job monitoring
- 02Government vacancy tracking
- 03Multi-region recruitment
- 04Financial services careers
How to scrape TalNet.
Step-by-step guide to extracting jobs from TalNet-powered career pages—endpoints, authentication, and working code.
import requests
from bs4 import BeautifulSoup
import re
company = "fcdo"
base_url = f"https://{company}.tal.net/candidate"
response = requests.get(base_url, timeout=15)
soup = BeautifulSoup(response.text, "html.parser")
# Look for job board links
job_links = soup.find_all("a", href=re.compile(r"/jobboard/vacancy/\d+/adv"))
if job_links:
print(f"Found job board: {job_links[0]['href']}")import requests
from bs4 import BeautifulSoup
import re
import xml.etree.ElementTree as ET
company = "royalvacancies"
# Try common RSS feed patterns
feed_patterns = [
f"https://{company}.tal.net/vx/mobile-0/appcentre-1/brand-2/candidate/jobboard/vacancy/1/feed",
f"https://{company}.tal.net/vx/appcentre-ext/candidate/jobboard/vacancy/1/feed",
]
for feed_url in feed_patterns:
resp = requests.get(feed_url, timeout=10)
if resp.status_code == 200 and "<feed" in resp.text:
root = ET.fromstring(resp.content)
entries = root.findall(".//{http://www.w3.org/2005/Atom}entry")
print(f"Found {len(entries)} jobs in RSS feed")
breakimport requests
from bs4 import BeautifulSoup
import re
job_board_url = "https://fcdo.tal.net/vx/appcentre-ext/candidate/jobboard/vacancy/1/adv/"
response = requests.get(job_board_url, timeout=15)
soup = BeautifulSoup(response.text, "html.parser")
# Extract job links using the /opp/ pattern
job_links = soup.find_all("a", href=re.compile(r"/opp/\d+"))
jobs = []
for link in job_links:
href = link.get("href", "")
# Extract job ID from URL pattern: /opp/{job_id}-{slug}
match = re.search(r"/opp/(\d+)", href)
if match:
jobs.append({
"id": match.group(1),
"title": link.get_text(strip=True),
"url": href if href.startswith("http") else f"https://fcdo.tal.net{href}"
})
print(f"Found {len(jobs)} jobs")import requests
from bs4 import BeautifulSoup
import re
import time
base_url = "https://fcdo.tal.net/vx/appcentre-ext/candidate/jobboard/vacancy/1/adv/"
all_jobs = []
offset = 0
while True:
url = f"{base_url}?start={offset}"
response = requests.get(url, timeout=15)
soup = BeautifulSoup(response.text, "html.parser")
job_links = soup.find_all("a", href=re.compile(r"/opp/\d+"))
if not job_links:
break
for link in job_links:
match = re.search(r"/opp/(\d+)", link.get("href", ""))
if match:
all_jobs.append({
"id": match.group(1),
"title": link.get_text(strip=True),
"url": link.get("href")
})
offset += 50
time.sleep(1) # Be respectful
if len(job_links) < 50: # Last page
break
print(f"Total jobs found: {len(all_jobs)}")import requests
from bs4 import BeautifulSoup
import re
def parse_job_details(job_url: str) -> dict:
response = requests.get(job_url, timeout=15)
soup = BeautifulSoup(response.text, "html.parser")
details = {"url": job_url}
# Extract title from h1
h1 = soup.find("h1")
if h1:
title_text = h1.get_text(strip=True)
details["title"] = title_text.replace("View Vacancy - ", "")
# Extract sections by looking for label patterns
page_text = soup.get_text()
patterns = {
"location": r"Location[^:]*:\s*([^\n]+)",
"city": r"Location \(City\)[^:]*:\s*([^\n]+)",
"salary": r"Salary[^:]*:\s*([^\n]+)",
"deadline": r"(?:Application deadline|Closing Date)[^:]*:\s*([^\n]+)",
"job_type": r"Type of Position[^:]*:\s*([^\n]+)",
"region": r"Region[^:]*:\s*([^\n]+)",
"grade": r"Grade[^:]*:\s*([^\n]+)",
}
for field, pattern in patterns.items():
match = re.search(pattern, page_text, re.IGNORECASE)
if match:
details[field] = match.group(1).strip()
# Extract full job description section
desc_match = re.search(
r"Job Description[^:]*:\s*(.+?)(?=Essential qualifications|Application deadline|$)",
page_text,
re.DOTALL | re.IGNORECASE
)
if desc_match:
details["description"] = desc_match.group(1).strip()[:2000]
return details
# Usage example with a real FCDO job URL
job = parse_job_details(
"https://fcdo.tal.net/vx/lang-en-GB/mobile-0/appcentre-1/brand-2/"
"candidate/so/pm/4/pl/1/opp/26301-example-job-title/en-GB"
)
print(job)import requests
from bs4 import BeautifulSoup
import re
import json
def extract_config(company: str) -> dict:
url = f"https://{company}.tal.net/candidate"
response = requests.get(url, timeout=15)
soup = BeautifulSoup(response.text, "html.parser")
config = {}
# Find embedded WCN configuration
scripts = soup.find_all("script")
for script in scripts:
if script.string and "WCN.global_config" in script.string:
# Extract baseUrl
base_match = re.search(r'baseUrl:\s*["']([^"']+)["']', script.string)
if base_match:
config["baseUrl"] = base_match.group(1)
# Extract rootPath
root_match = re.search(r'rootPath:\s*["']([^"']+)["']', script.string)
if root_match:
config["rootPath"] = root_match.group(1)
# Extract appcentre ID from rootPath
appcentre_match = re.search(r'appcentre-(\d+)', script.string)
if appcentre_match:
config["appcentreId"] = appcentre_match.group(1)
# Extract brand ID
brand_match = re.search(r'brand-(\d+)', script.string)
if brand_match:
config["brandId"] = brand_match.group(1)
break
return config
# Usage
config = extract_config("fcdo")
print(f"Appcentre ID: {config.get('appcentreId')}")
print(f"Brand ID: {config.get('brandId')}")
print(f"Base URL: {config.get('baseUrl')}")Some companies (e.g., Evercore, Lazard) restrict access to their base candidate page. Try the full job board URL with /candidate/jobboard/vacancy/1/adv/ path, or check if the company uses a different subdomain.
Different companies use different appcentre IDs, brand IDs, and board IDs. Always discover the correct URL pattern by inspecting the company's main candidate page or extracting the WCN.global_config from the HTML.
The observed /api/v1/cms/adv endpoints are for tracking/analytics only and return empty data. Use HTML parsing or RSS feeds instead of relying on JSON APIs.
Look for the RSS feed link in the HTML head: <link rel='alternate' type='application/atom+xml'>. Alternatively, try constructing the feed URL using the pattern /vx/mobile-0/appcentre-{id}/brand-{id}/candidate/jobboard/vacancy/{board_id}/feed
Look for links with ?start= parameter. The page size is typically 50 jobs. Parse pagination links from the HTML to determine if more pages exist.
Field labels and HTML structure vary between TalNet deployments. Use flexible regex patterns with case-insensitive matching and handle missing fields gracefully with default values.
- 1Check for RSS/Atom feeds first - they are more reliable and structured than HTML parsing
- 2Extract WCN.global_config from JavaScript to get appcentre ID, brand ID, and base URL dynamically
- 3Use the /opp/ pattern in URLs to identify and extract job links from listings pages
- 4Add 1-2 second delays between requests to avoid overwhelming the server
- 5Handle multiple job boards per company by discovering all board IDs from the candidate page
- 6Test with multiple companies to handle URL pattern variations (FCDO, BlackRock, Royal Vacancies)
- 7Use regex patterns with case-insensitive matching for flexible field extraction
One endpoint. All TalNet jobs. No scraping, no sessions, no maintenance.
Get API accesscurl "https://enterprise.jobo.world/api/jobs?sources=talnet" \
-H "X-Api-Key: YOUR_KEY" Access TalNet
job data today.
One API call. Structured data. No scraping infrastructure to build or maintain — start with the free tier and scale as you grow.