LLM Gateway

Models

List all available models

GET
/v1/models

Response Body

curl -X GET "http://localhost:4001/v1/models"
fetch("http://localhost:4001/v1/models")
package main

import (
  "fmt"
  "net/http"
  "io/ioutil"
)

func main() {
  url := "http://localhost:4001/v1/models"

  req, _ := http.NewRequest("GET", url, nil)
  
  res, _ := http.DefaultClient.Do(req)
  defer res.Body.Close()
  body, _ := ioutil.ReadAll(res.Body)

  fmt.Println(res)
  fmt.Println(string(body))
}
import requests

url = "http://localhost:4001/v1/models"

response = requests.request("GET", url)

print(response.text)
{
  "data": [
    {
      "id": "string",
      "name": "string",
      "created": 0,
      "description": "string",
      "architecture": {
        "input_modalities": [
          "text"
        ],
        "output_modalities": [
          "text"
        ],
        "tokenizer": "string"
      },
      "top_provider": {
        "is_moderated": true
      },
      "providers": [
        {
          "providerId": "string",
          "modelName": "string",
          "pricing": {
            "prompt": "string",
            "completion": "string",
            "image": "string"
          }
        }
      ],
      "pricing": {
        "prompt": "string",
        "completion": "string",
        "image": "string",
        "request": "string",
        "input_cache_read": "string",
        "input_cache_write": "string",
        "web_search": "string",
        "internal_reasoning": "string"
      },
      "context_length": 0,
      "hugging_face_id": "string",
      "per_request_limits": {
        "property1": "string",
        "property2": "string"
      },
      "supported_parameters": [
        "string"
      ]
    }
  ]
}