Real working code in Python, JavaScript, and cURL
Complete Python scripts ready to run
import requests
import time
API_BASE = "https://finetunelab.ai"
AUTH_TOKEN = "your-token"
headers = {
"Authorization": f"Bearer {AUTH_TOKEN}",
"Content-Type": "application/json"
}
def monitor_training(job_id):
url = f"{API_BASE}/api/training/metrics/{job_id}"
while True:
response = requests.get(url, headers=headers)
metrics = response.json()
print(f"Step {metrics['current_step']}/{metrics['total_steps']}")
print(f"Loss: {metrics['train_loss']:.4f}")
if metrics.get("status") == "completed":
break
time.sleep(10)import json
def validate_jsonl(file_path):
errors = []
with open(file_path, 'r') as f:
for i, line in enumerate(f, 1):
try:
data = json.loads(line)
if "messages" not in data:
errors.append(f"Line {i}: Missing messages")
except json.JSONDecodeError:
errors.append(f"Line {i}: Invalid JSON")
return errorsModern async/await patterns for Node.js and browsers
class TrainingClient {
constructor(baseUrl, authToken) {
this.baseUrl = baseUrl;
this.authToken = authToken;
}
async createConfig(config) {
const response = await fetch(`${this.baseUrl}/api/training`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.authToken}`,
'Content-Type': 'application/json',
},
body: JSON.stringify(config),
});
return response.json();
}
async getMetrics(jobId) {
const response = await fetch(`${this.baseUrl}/api/training/metrics/${jobId}`, {
headers: { 'Authorization': `Bearer ${this.authToken}` },
});
return response.json();
}
}import { useState, useEffect } from 'react';
export function useTraining(jobId) {
const [metrics, setMetrics] = useState(null);
useEffect(() => {
if (!jobId) return;
const fetchMetrics = async () => {
const response = await fetch(`/api/training/metrics/${jobId}`);
setMetrics(await response.json());
};
fetchMetrics();
const interval = setInterval(fetchMetrics, 5000);
return () => clearInterval(interval);
}, [jobId]);
return metrics;
}Quick copy-paste commands for terminal use
curl -X POST https://finetunelab.ai/api/training \
-H "Content-Type: application/json" \
-d '{"name": "my-model", "base_model": "meta-llama/Llama-3.2-1B", "dataset_id": "dataset-123", "epochs": 3}'curl -X POST https://finetunelab.ai/api/training/execute \
-H "Content-Type: application/json" \
-d '{"id": "config-456"}'curl https://finetunelab.ai/api/training/status/job-789curl https://finetunelab.ai/api/training/metrics/job-789curl -X POST https://finetunelab.ai/api/training/pause/job-789curl -X POST https://finetunelab.ai/api/training/resume/job-789curl -O https://finetunelab.ai/api/training/download/job-789curl https://finetunelab.ai/api/training/analytics/job-789Deploy trained models to production with RunPod Serverless
import requests
import time
class InferenceDeployment:
def deploy(self, training_job_id, deployment_name, budget_limit=10.0):
url = f"{self.api_base}/api/inference/deploy"
payload = {
"provider": "runpod-serverless",
"deployment_name": deployment_name,
"training_job_id": training_job_id,
"gpu_type": "NVIDIA RTX A4000",
"budget_limit": budget_limit
}
response = requests.post(url, json=payload, headers=self.headers)
return response.json()
def get_status(self, deployment_id):
url = f"{self.api_base}/api/inference/deployments/{deployment_id}/status"
return requests.get(url, headers=self.headers).json()
def make_inference_request(self, endpoint_url, prompt):
payload = {"input": {"prompt": prompt, "max_tokens": 512}}
return requests.post(endpoint_url, json=payload).json()class InferenceClient {
async deploy(config) {
return this.fetch('/api/inference/deploy', {
method: 'POST',
body: JSON.stringify({
provider: 'runpod-serverless',
deployment_name: config.deploymentName,
training_job_id: config.trainingJobId,
gpu_type: config.gpuType || 'NVIDIA RTX A4000',
budget_limit: config.budgetLimit || 10.0
})
});
}
async makeInferenceRequest(endpointUrl, prompt) {
const response = await fetch(endpointUrl, {
method: 'POST',
body: JSON.stringify({ input: { prompt, max_tokens: 512 }})
});
return response.json();
}
}curl -X POST https://finetunelab.ai/api/inference/deploy \
-H "Authorization: Bearer YOUR_TOKEN" \
-H "Content-Type: application/json" \
-d '{
"provider": "runpod-serverless",
"deployment_name": "my-model-prod",
"training_job_id": "job-abc123",
"gpu_type": "NVIDIA RTX A4000",
"budget_limit": 10.0,
"min_workers": 0,
"max_workers": 3,
"auto_stop_on_budget": true
}'curl -X GET \
https://finetunelab.ai/api/inference/deployments/dep-xyz789/status \
-H "Authorization: Bearer YOUR_TOKEN"curl -X POST https://your-endpoint.runpod.net \
-H "Content-Type: application/json" \
-d '{
"input": {
"prompt": "Explain quantum computing",
"max_tokens": 512,
"temperature": 0.7
}
}'curl -X DELETE \
https://finetunelab.ai/api/inference/deployments/dep-xyz789/stop \
-H "Authorization: Bearer YOUR_TOKEN"Complete scenarios from start to finish
Prepare dataset (training_data.jsonl)
{"messages": [{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}]}Upload dataset
POST /api/training/datasetsCreate training configuration
POST /api/training (learning_rate: 0.0001, batch_size: 4, epochs: 3)Start training job
POST /api/training/executeMonitor progress (poll every 5-10 seconds)
GET /api/training/metrics/:idDownload trained model
GET /api/training/download/:idComplete training workflow (see Workflow 1)
Review final metrics
GET /api/training/analytics/:id (check final loss, eval metrics)Deploy to RunPod Serverless
POST /api/training/deploy/:idTest deployed model
curl -X POST https://finetunelab.ai/v1/chat/completions -d '{"model": "...", "messages": [...]}'Add model to your app
POST /api/models (register custom model with base_url)Monitor production usage
Track latency, error rates, user feedbackCreate baseline config (learning_rate: 1e-4, batch_size: 4)
POST /api/training → config-baselineCreate variant configs with different hyperparameters
POST /api/training → config-lr-high (lr: 5e-4), config-lr-low (lr: 5e-5)Run all training jobs in parallel
POST /api/training/execute for each configCompare results
GET /api/training/analytics/compare?ids=job1,job2,job3Select best performing config and deploy
Choose config with lowest eval loss and best convergenceYou now have working code examples in Python, JavaScript, and cURL. Pick your favorite language and start building!