API Reference
Auth model, core endpoint map, runtime examples, and error contract for DCP provider/renter/admin integrations.
Base URLs
- Public proxy: `https://dcp.sa/api/dc1`
- Direct API (ops/debug): `https://api.dcp.sa/api`
Authentication model
- **Provider**: `x-provider-key` header or `?key=` query
- **Renter**: `x-renter-key` header or `?key=` query
- **Admin**: `x-admin-token` header (or `Authorization: Bearer <token>`)
Key examples:
- `dc1-provider-...`
- `dc1-renter-...`
Error contract
All failures return JSON:
{ "error": "descriptive message" }Example (`400`):
{ "error": "Missing required fields: job_type, duration_minutes" }`/v1` deterministic error envelope (`/v1/models`, `/v1/chat/completions`)
`/v1` endpoints return a canonical machine-readable envelope:
{
"error": {
"message": "Provider failover exhausted after initial error: timeout",
"type": "timeout_error",
"code": "upstream_timeout",
"status": ,
"retryable": true
}
}For rate limits (`429`), retry metadata is always present:
{
"error": {
"message": "Rate limit exceeded. Retry after seconds.",
"type": "rate_limit_error",
"code": "rate_limit_exceeded",
"status": ,
"retryable": true,
"retry_after_seconds": ,
"retry_after_ms":
},
"retry_after_seconds": ,
"retry_after_ms":
}SDK retry guidance for `/v1`:
- Retry with backoff when `error.code` is `rate_limit_exceeded`, `no_capacity_available`, `provider_unavailable`, or `upstream_timeout`.
- Use `retry_after_seconds` when present (especially on `429`).
- Do not retry without request changes on non-retryable validation/auth/billing errors.
Critical endpoint parity examples
The following examples are aligned with production route contracts in `backend/src/routes/*.js` and `docs/openapi.yaml`.
1) Renter registration — `POST /renters/register`
cURL:
curl -s -X POST "https://dcp.sa/api/dc1/renters/register" \
-H "Content-Type: application/json" \
-d ;{
"name": "Fatima Al-Saud",
"email": "fatima@example.sa",
"organization": "Riyadh AI Lab",
"use_case": "llm_inference",
"phone": "+"
};Node.js:
const res = await fetch(;https://dcp.sa/api/dc1/renters/register;, {
method: ;POST;,
headers: { ;Content-Type;: ;application/json; },
body: JSON.stringify({
name: ;Fatima Al-Saud;,
email: ;fatima@example.sa;,
organization: ;Riyadh AI Lab;,
use_case: ;llm_inference;,
phone: ;+;,
}),
})
const data = await res.json()
if (!res.ok) throw new Error(data.error)Python:
import requests
res = requests.post(
"https://dcp.sa/api/dc1/renters/register",
json={
"name": "Fatima Al-Saud",
"email": "fatima@example.sa",
"organization": "Riyadh AI Lab",
"use_case": "llm_inference",
"phone": "+",
},
timeout=,
)
res.raise_for_status()
print(res.json())Success response (`201`):
{
"success": true,
"renter_id": ,
"api_key": "dc1-renter-abc123...",
"message": "Welcome Fatima Al-Saud! Save your API key — it won;t be shown again."
}2) Provider heartbeat — `POST /providers/heartbeat`
cURL:
curl -s -X POST "https://dcp.sa/api/dc1/providers/heartbeat" \
-H "Content-Type: application/json" \
-d ;{
"api_key": "dc1-provider-abc123...",
"gpu_status": {
"gpu_name": "NVIDIA RTX ",
"gpu_vram_mib": ,
"gpu_util_pct": ,
"temp_c": ,
"daemon_version": ".3.",
"python_version": ".11.",
"os_info": "Ubuntu .04"
},
"provider_hostname": "gpu-host-",
"cached_models": ["TinyLlama/TinyLlama-.1B-Chat-v1."]
};Node.js:
const hb = await fetch(;https://dcp.sa/api/dc1/providers/heartbeat;, {
method: ;POST;,
headers: { ;Content-Type;: ;application/json; },
body: JSON.stringify({
api_key: process.env.DC1_PROVIDER_KEY,
gpu_status: {
gpu_name: ;NVIDIA RTX ;,
gpu_vram_mib: ,
gpu_util_pct: ,
temp_c: ,
daemon_version: ;.3.;,
},
}),
})
const hbData = await hb.json()
if (!hb.ok) throw new Error(hbData.error)Python:
import requests
hb = requests.post(
"https://dcp.sa/api/dc1/providers/heartbeat",
json={
"api_key": "dc1-provider-abc123...",
"gpu_status": {
"gpu_name": "NVIDIA RTX ",
"gpu_vram_mib": ,
"gpu_util_pct": ,
"temp_c": ,
"daemon_version": ".3.",
},
},
timeout=,
)
hb.raise_for_status()
print(hb.json())Success response (`200`):
{
"success": true,
"message": "Heartbeat received",
"timestamp": "--22T19::.000Z",
"needs_update": false,
"latest_version": ".3.",
"update_available": false,
"min_version": ".3.",
"approval_status": "approved",
"approved": true,
"preload_model": null
}3) Job submission — `POST /jobs/submit`
cURL:
curl -s -X POST "https://dcp.sa/api/dc1/jobs/submit" \
-H "Content-Type: application/json" \
-H "x-renter-key: $RENTER_KEY" \
-d ;{
"provider_id": ,
"job_type": "llm_inference",
"duration_minutes": ,
"max_duration_seconds": ,
"container_spec": { "image_type": "vllm-serve" },
"params": {
"model": "TinyLlama/TinyLlama-.1B-Chat-v1.",
"prompt": "Summarize DCP in three bullets"
}
};Node.js:
const submit = await fetch(;https://dcp.sa/api/dc1/jobs/submit;, {
method: ;POST;,
headers: {
;Content-Type;: ;application/json;,
;x-renter-key;: process.env.DC1_RENTER_KEY!,
},
body: JSON.stringify({
provider_id: ,
job_type: ;llm_inference;,
duration_minutes: ,
max_duration_seconds: ,
container_spec: { image_type: ;vllm-serve; },
params: {
model: ;TinyLlama/TinyLlama-.1B-Chat-v1.;,
prompt: ;Summarize DCP in three bullets;,
},
}),
})
const submitted = await submit.json()
if (!submit.ok) throw new Error(submitted.error)Python:
import requests
submitted = requests.post(
"https://dcp.sa/api/dc1/jobs/submit",
headers={
"Content-Type": "application/json",
"x-renter-key": "dc1-renter-abc123...",
},
json={
"provider_id": ,
"job_type": "llm_inference",
"duration_minutes": ,
"max_duration_seconds": ,
"container_spec": {"image_type": "vllm-serve"},
"params": {
"model": "TinyLlama/TinyLlama-.1B-Chat-v1.",
"prompt": "Summarize DCP in three bullets",
},
},
timeout=,
)
submitted.raise_for_status()
print(submitted.json())Success response (`201`):
{
"success": true,
"job": {
"id": ,
"job_id": "job--ab12cd",
"provider_id": ,
"renter_id": ,
"job_type": "llm_inference",
"model": "TinyLlama/TinyLlama-.1B-Chat-v1.",
"status": "pending",
"submitted_at": "--22T19::.000Z",
"duration_minutes": ,
"cost_halala": ,
"max_duration_seconds": ,
"timeout_at": "-- ::.000",
"gpu_requirements": null,
"container_spec": { "image_type": "vllm-serve", "pricing_class": "standard" },
"workspace_volume_name": "dcp-job-job--ab12cd",
"checkpoint_enabled": false,
"task_spec_signed": true,
"priority": ,
"pricing_class": "standard",
"prewarm_requested": false,
"queue_position": null
}
}4) Admin dashboard — `GET /admin/dashboard`
cURL:
curl -s "https://dcp.sa/api/dc1/admin/dashboard" \
-H "x-admin-token: $DC1_ADMIN_TOKEN"Node.js:
const dash = await fetch(;https://dcp.sa/api/dc1/admin/dashboard;, {
headers: { ;x-admin-token;: process.env.DC1_ADMIN_TOKEN! },
})
const dashboard = await dash.json()
if (!dash.ok) throw new Error(dashboard.error)Python:
import requests
dashboard = requests.get(
"https://dcp.sa/api/dc1/admin/dashboard",
headers={"x-admin-token": "<DC1_ADMIN_TOKEN>"},
timeout=,
)
dashboard.raise_for_status()
print(dashboard.json())Success response (`200`) shape:
{
"stats": {
"total_providers": ,
"online_now": ,
"offline": ,
"total_renters": ,
"active_renters": ,
"total_renter_balance_halala": ,
"total_jobs": ,
"completed_jobs": ,
"failed_jobs": ,
"active_jobs": ,
"total_revenue_halala": ,
"total_dc1_fees_halala": ,
"today_revenue_halala": ,
"today_dc1_fees_halala": ,
"today_jobs": ,
"timestamp": "--22T19::.000Z"
},
"gpu_breakdown": [],
"recent_signups": [],
"recent_heartbeats": []
}OpenAPI source
- Human-readable: API docs page
- Machine-readable: `/docs/openapi.yaml`
- Runtime checks: Runtime Verification Runbook