Quickstart
Fastest way to start using LLM Gateway in any language or framework.
🚀 Quickstart
Welcome to LLM Gateway—a single drop‑in endpoint that lets you call today’s best large‑language models while keeping your existing code and development workflow intact.
TL;DR — Point your HTTP requests to
https://api.llmgateway.io/v1/…
, supply yourLLM_GATEWAY_API_KEY
, and you’re done.
1 · Get an API key
- Sign in to the dashboard.
- Create a new Project → Copy the key.
- Export it in your shell (or a
.env
file):
export LLM_GATEWAY_API_KEY="llmgtwy_XXXXXXXXXXXXXXXX"
2 · Pick your language
curl -X POST https://api.llmgateway.io/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $LLM_GATEWAY_API_KEY" \
-d '{
"model": "gpt-4o",
"messages": [
{"role": "user", "content": "Hello, how are you?"}
]
}'
const response = await fetch('https://api.llmgateway.io/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${process.env.LLM_GATEWAY_API_KEY}`
},
body: JSON.stringify({
model: 'gpt-4o',
messages: [
{ role: 'user', content: 'Hello, how are you?' }
]
})
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
console.log(data.choices[0].message.content);
import { useState } from 'react'
function ChatComponent() {
const [response, setResponse] = useState('');
const [loading, setLoading] = useState(false);
const sendMessage = async () => {
setLoading(true);
try {
const res = await fetch('https://api.llmgateway.io/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${process.env.REACT_APP_LLM_GATEWAY_API_KEY}`
},
body: JSON.stringify({
model: 'gpt-4o',
messages: [
{ role: 'user', content: 'Hello, how are you?' }
]
})
});
if (!res.ok) {
throw new Error(`HTTP error! status: ${res.status}`);
}
const data = await res.json();
setResponse(data.choices[0].message.content);
} catch (error) {
console.error('Error:', error);
} finally {
setLoading(false);
}
};
return (
<div>
<button onClick={sendMessage} disabled={loading}>
{loading ? "Sending..." : "Send Message"}
</button>
{response && <p>{response}</p>}
</div>
); }
export default ChatComponent;
; // app/api/chat/route.ts
import { NextRequest, NextResponse } from "next/server";
export async function POST(request: NextRequest) {
const { message } = await request.json();
const response = await fetch('https://api.llmgateway.io/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${process.env.LLM_GATEWAY_API_KEY}`
},
body: JSON.stringify({
model: 'gpt-4o',
messages: [
{ role: 'user', content: message }
]
})
});
if (!response.ok) {
return NextResponse.json({ error: 'Failed to get response' }, { status: response.status });
}
const data = await response.json();
return NextResponse.json({
message: data.choices[0].message.content
});
}
// Usage in component:
// const response = await fetch('/api/chat', {
// method: 'POST',
// headers: { 'Content-Type': 'application/json' },
// body: JSON.stringify({ message: 'Hello, how are you?' })
// });
import requests
import os
response = requests.post(
'https://api.llmgateway.io/v1/chat/completions',
headers={
'Content-Type': 'application/json',
'Authorization': f'Bearer {os.getenv("LLM_GATEWAY_API_KEY")}'
},
json={
'model': 'gpt-4o',
'messages': [
{'role': 'user', 'content': 'Hello, how are you?'}
]
}
)
response.raise_for_status()
print(response.json()['choices'][0]['message']['content'])
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.net.URI;
String apiKey = System.getenv("LLM_GATEWAY_API_KEY");
String requestBody = """
{
"model": "gpt-4o",
"messages": [
{"role": "user", "content": "Hello, how are you?"}
]
}
""";
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create("https://api.llmgateway.io/v1/chat/completions"))
.header("Content-Type", "application/json")
.header("Authorization", "Bearer " + apiKey)
.POST(HttpRequest.BodyPublishers.ofString(requestBody))
.build();
HttpResponse<String> response = HttpClient.newHttpClient()
.send(request, HttpResponse.BodyHandlers.ofString());
System.out.println(response.body());
use reqwest::Client;
use serde_json::json;
use std::env;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new();
let api_key = env::var("LLM_GATEWAY_API_KEY")?;
let response = client
.post("https://api.llmgateway.io/v1/chat/completions")
.header("Content-Type", "application/json")
.header("Authorization", format!("Bearer {}", api_key))
.json(&json!({
"model": "gpt-4o",
"messages": [
{"role": "user", "content": "Hello, how are you?"}
]
}))
.send()
.await?;
let result: serde_json::Value = response.json().await?;
println!("{}", result["choices"][0]["message"]["content"]);
Ok(())
}
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
)
type ChatRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
}
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
func main() {
apiKey := os.Getenv("LLM_GATEWAY_API_KEY")
requestBody := ChatRequest{
Model: "gpt-4o",
Messages: []Message{{Role: "user", Content: "Hello, how are you?"}},
}
jsonData, _ := json.Marshal(requestBody)
req, _ := http.NewRequest("POST", "https://api.llmgateway.io/v1/chat/completions", bytes.NewBuffer(jsonData))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+apiKey)
client := &http.Client{}
resp, _ := client.Do(req)
defer resp.Body.Close()
fmt.Println("Response received")
}
<?php
$apiKey = $_ENV['LLM_GATEWAY_API_KEY'];
$data = [
'model' => 'gpt-4o',
'messages' => [
['role' => 'user', 'content' => 'Hello, how are you?']
]
];
$options = [
'http' => [
'header' => [
'Content-Type: application/json',
'Authorization: Bearer ' . $apiKey
],
'method' => 'POST',
'content' => json_encode($data)
]
];
$context = stream_context_create($options);
$response = file_get_contents(
'https://api.llmgateway.io/v1/chat/completions',
false,
$context
);
if ($response === FALSE) {
throw new Exception('Request failed');
}
$result = json_decode($response, true);
echo $result['choices'][0]['message']['content'];
?>
require 'net/http'
require 'json'
require 'uri'
uri = URI('https://api.llmgateway.io/v1/chat/completions')
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
request = Net::HTTP::Post.new(uri)
request['Content-Type'] = 'application/json'
request['Authorization'] = "Bearer #{ENV['LLM_GATEWAY_API_KEY']}"
request.body = {
model: 'gpt-4o',
messages: [
{ role: 'user', content: 'Hello, how are you?' }
]
}.to_json
response = http.request(request)
if response.code != '200'
raise "HTTP Error: #{response.code}"
end
result = JSON.parse(response.body)
puts result['choices'][0]['message']['content']
3 · SDK integrations
import { llmgateway } from "@llmgateway/ai-sdk-provider";
import { generateText } from "ai";
const { text } = await generateText({
model: llmgateway("openai/gpt-4o"),
prompt: "Write a vegetarian lasagna recipe for 4 people.",
});
import { createOpenAI } from "@ai-sdk/openai";
const llmgateway = createOpenAI({
baseURL: "https://api.llmgateway.io/v1",
apiKey: process.env.LLM_GATEWAY_API_KEY!,
});
const completion = await llmgateway.chat({
model: "gpt-4o",
messages: [{ role: "user", content: "Hello, how are you?" }],
});
console.log(completion.choices[0].message.content);
import OpenAI from "openai";
const openai = new OpenAI({
baseURL: "https://api.llmgateway.io/v1",
apiKey: process.env.LLM_GATEWAY_API_KEY,
});
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "Hello, how are you?" }],
});
console.log(completion.choices[0].message.content);
4 · Going further
- Streaming: pass
stream: true
to any request—Gateway will proxy the event stream unchanged. - Monitoring: Every call appears in the dashboard with latency, cost & provider breakdown.
- Fail‑over: Specify
fallback_models
to auto‑retry on provider errors.
5 · FAQ
6 · Next steps
- Read Self host docs guide.
- Drop into our GitHub for help or feature requests.
Happy building! ✨