import requests
import json
url ="https://api.cyfuture.ai/aiapi/inferencing/response"payload ={"model":"Model Name","max_tokens":16384,"top_p":1,"top_k":40,"presence_penalty":0,"frequency_penalty":0,"temperature":0.6,"messages":[{"role":"user","content":"Hello, how are you?"}]}headers ={"Accept":"application/json","Content-Type":"application/json","Authorization":"Bearer <API_KEY>"}requests.request("POST", url, headers=headers, data=json.dumps(payload))
awaitfetch("https://api.cyfuture.ai/aiapi/inferencing/response",{ method:"POST", headers:{"Accept":"application/json","Content-Type":"application/json","Authorization":"Bearer <API_KEY>"}, body:JSON.stringify({ model:""Model Name"", max_tokens:16384, top_p:1, top_k:40, presence_penalty:0, frequency_penalty:0, temperature:0.6, messages:[{ role:"user", content:"Hello, how are you?"}]})});
URI uri = URI.create("https://api.cyfuture.ai/aiapi/inferencing/response");HttpClient client =HttpClient.newHttpClient();HttpRequest request =HttpRequest.newBuilder().uri(uri).header("Accept","application/json").header("Content-Type","application/json").header("Authorization","Bearer <API_KEY>").POST(HttpRequest.BodyPublishers.ofString("""{"model":""Model Name"","max_tokens":16384,"top_p":1,"top_k":40,"presence_penalty":0,"frequency_penalty":0,"temperature":0.6,"messages":[{"role":"user","content":"Hello, how are you?"}]}""")).build();HttpResponse<String> response = client.send(request,HttpResponse.BodyHandlers.ofString());
Deepseek R1 Distill Llama 70b can be fine-tuned on your data to create a model with better response quality. Cyfuture AI uses low-rank adaptation (LoRA) to train a model that can be served efficiently at inference time.
On-demand deployments allow you to use Deepseek R1 Distill Llama 70b on dedicated GPUs with Cyfuture AI' high-performance serving stack with high reliability and no rate limits.