2158 lines
58 KiB
JSON
2158 lines
58 KiB
JSON
{
|
|
"openapi": "3.0.3",
|
|
"info": {
|
|
"title": "Text Generation Inference",
|
|
"description": "Text Generation Webserver",
|
|
"contact": {
|
|
"name": "Olivier Dehaene"
|
|
},
|
|
"license": {
|
|
"name": "Apache 2.0",
|
|
"url": "https://www.apache.org/licenses/LICENSE-2.0"
|
|
},
|
|
"version": "2.2.1-dev0"
|
|
},
|
|
"paths": {
|
|
"/": {
|
|
"post": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`",
|
|
"operationId": "compat_generate",
|
|
"requestBody": {
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/CompatGenerateRequest"
|
|
}
|
|
}
|
|
},
|
|
"required": true
|
|
},
|
|
"responses": {
|
|
"200": {
|
|
"description": "Generated Text",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/GenerateResponse"
|
|
}
|
|
},
|
|
"text/event-stream": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/StreamResponse"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"422": {
|
|
"description": "Input validation error",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Input validation error"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"424": {
|
|
"description": "Generation Error",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Request failed during generation"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"429": {
|
|
"description": "Model is overloaded",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Model is overloaded"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"500": {
|
|
"description": "Incomplete generation",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Incomplete generation"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/generate": {
|
|
"post": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Generate tokens",
|
|
"operationId": "generate",
|
|
"requestBody": {
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/GenerateRequest"
|
|
}
|
|
}
|
|
},
|
|
"required": true
|
|
},
|
|
"responses": {
|
|
"200": {
|
|
"description": "Generated Text",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/GenerateResponse"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"422": {
|
|
"description": "Input validation error",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Input validation error"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"424": {
|
|
"description": "Generation Error",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Request failed during generation"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"429": {
|
|
"description": "Model is overloaded",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Model is overloaded"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"500": {
|
|
"description": "Incomplete generation",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Incomplete generation"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/generate_stream": {
|
|
"post": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Generate a stream of token using Server-Sent Events",
|
|
"operationId": "generate_stream",
|
|
"requestBody": {
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/GenerateRequest"
|
|
}
|
|
}
|
|
},
|
|
"required": true
|
|
},
|
|
"responses": {
|
|
"200": {
|
|
"description": "Generated Text",
|
|
"content": {
|
|
"text/event-stream": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/StreamResponse"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"422": {
|
|
"description": "Input validation error",
|
|
"content": {
|
|
"text/event-stream": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Input validation error"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"424": {
|
|
"description": "Generation Error",
|
|
"content": {
|
|
"text/event-stream": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Request failed during generation"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"429": {
|
|
"description": "Model is overloaded",
|
|
"content": {
|
|
"text/event-stream": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Model is overloaded"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"500": {
|
|
"description": "Incomplete generation",
|
|
"content": {
|
|
"text/event-stream": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Incomplete generation"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/health": {
|
|
"get": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Health check method",
|
|
"operationId": "health",
|
|
"responses": {
|
|
"200": {
|
|
"description": "Everything is working fine"
|
|
},
|
|
"503": {
|
|
"description": "Text generation inference is down",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "unhealthy",
|
|
"error_type": "healthcheck"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/info": {
|
|
"get": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Text Generation Inference endpoint info",
|
|
"operationId": "get_model_info",
|
|
"responses": {
|
|
"200": {
|
|
"description": "Served model info",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/Info"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/metrics": {
|
|
"get": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Prometheus metrics scrape endpoint",
|
|
"operationId": "metrics",
|
|
"responses": {
|
|
"200": {
|
|
"description": "Prometheus Metrics",
|
|
"content": {
|
|
"text/plain": {
|
|
"schema": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/tokenize": {
|
|
"post": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Tokenize inputs",
|
|
"operationId": "tokenize",
|
|
"requestBody": {
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/GenerateRequest"
|
|
}
|
|
}
|
|
},
|
|
"required": true
|
|
},
|
|
"responses": {
|
|
"200": {
|
|
"description": "Tokenized ids",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/TokenizeResponse"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"404": {
|
|
"description": "No tokenizer found",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "No fast tokenizer available"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/v1/chat/completions": {
|
|
"post": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Generate tokens",
|
|
"operationId": "chat_completions",
|
|
"requestBody": {
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ChatRequest"
|
|
}
|
|
}
|
|
},
|
|
"required": true
|
|
},
|
|
"responses": {
|
|
"200": {
|
|
"description": "Generated Chat Completion",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ChatCompletion"
|
|
}
|
|
},
|
|
"text/event-stream": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ChatCompletionChunk"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"422": {
|
|
"description": "Input validation error",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Input validation error"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"424": {
|
|
"description": "Generation Error",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Request failed during generation"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"429": {
|
|
"description": "Model is overloaded",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Model is overloaded"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"500": {
|
|
"description": "Incomplete generation",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Incomplete generation"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/v1/completions": {
|
|
"post": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Generate tokens",
|
|
"operationId": "completions",
|
|
"requestBody": {
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/CompletionRequest"
|
|
}
|
|
}
|
|
},
|
|
"required": true
|
|
},
|
|
"responses": {
|
|
"200": {
|
|
"description": "Generated Chat Completion",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/CompletionFinal"
|
|
}
|
|
},
|
|
"text/event-stream": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/Chunk"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"422": {
|
|
"description": "Input validation error",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Input validation error"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"424": {
|
|
"description": "Generation Error",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Request failed during generation"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"429": {
|
|
"description": "Model is overloaded",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Model is overloaded"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"500": {
|
|
"description": "Incomplete generation",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
},
|
|
"example": {
|
|
"error": "Incomplete generation"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"/v1/models": {
|
|
"get": {
|
|
"tags": [
|
|
"Text Generation Inference"
|
|
],
|
|
"summary": "Get model info",
|
|
"operationId": "openai_get_model_info",
|
|
"responses": {
|
|
"200": {
|
|
"description": "Served model info",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ModelInfo"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"404": {
|
|
"description": "Model not found",
|
|
"content": {
|
|
"application/json": {
|
|
"schema": {
|
|
"$ref": "#/components/schemas/ErrorResponse"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"components": {
|
|
"schemas": {
|
|
"BestOfSequence": {
|
|
"type": "object",
|
|
"required": [
|
|
"generated_text",
|
|
"finish_reason",
|
|
"generated_tokens",
|
|
"prefill",
|
|
"tokens"
|
|
],
|
|
"properties": {
|
|
"finish_reason": {
|
|
"$ref": "#/components/schemas/FinishReason"
|
|
},
|
|
"generated_text": {
|
|
"type": "string",
|
|
"example": "test"
|
|
},
|
|
"generated_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"example": 1,
|
|
"minimum": 0
|
|
},
|
|
"prefill": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/PrefillToken"
|
|
}
|
|
},
|
|
"seed": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": 42,
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"tokens": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/Token"
|
|
}
|
|
},
|
|
"top_tokens": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/Token"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"ChatCompletion": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"created",
|
|
"model",
|
|
"system_fingerprint",
|
|
"choices",
|
|
"usage"
|
|
],
|
|
"properties": {
|
|
"choices": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/ChatCompletionComplete"
|
|
}
|
|
},
|
|
"created": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": "1706270835",
|
|
"minimum": 0
|
|
},
|
|
"id": {
|
|
"type": "string"
|
|
},
|
|
"model": {
|
|
"type": "string",
|
|
"example": "mistralai/Mistral-7B-Instruct-v0.2"
|
|
},
|
|
"system_fingerprint": {
|
|
"type": "string"
|
|
},
|
|
"usage": {
|
|
"$ref": "#/components/schemas/Usage"
|
|
}
|
|
}
|
|
},
|
|
"ChatCompletionChoice": {
|
|
"type": "object",
|
|
"required": [
|
|
"index",
|
|
"delta"
|
|
],
|
|
"properties": {
|
|
"delta": {
|
|
"$ref": "#/components/schemas/ChatCompletionDelta"
|
|
},
|
|
"finish_reason": {
|
|
"type": "string",
|
|
"nullable": true
|
|
},
|
|
"index": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"minimum": 0
|
|
},
|
|
"logprobs": {
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/ChatCompletionLogprobs"
|
|
}
|
|
],
|
|
"nullable": true
|
|
}
|
|
}
|
|
},
|
|
"ChatCompletionChunk": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"created",
|
|
"model",
|
|
"system_fingerprint",
|
|
"choices"
|
|
],
|
|
"properties": {
|
|
"choices": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/ChatCompletionChoice"
|
|
}
|
|
},
|
|
"created": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": "1706270978",
|
|
"minimum": 0
|
|
},
|
|
"id": {
|
|
"type": "string"
|
|
},
|
|
"model": {
|
|
"type": "string",
|
|
"example": "mistralai/Mistral-7B-Instruct-v0.2"
|
|
},
|
|
"system_fingerprint": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"ChatCompletionComplete": {
|
|
"type": "object",
|
|
"required": [
|
|
"index",
|
|
"message",
|
|
"finish_reason"
|
|
],
|
|
"properties": {
|
|
"finish_reason": {
|
|
"type": "string"
|
|
},
|
|
"index": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"minimum": 0
|
|
},
|
|
"logprobs": {
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/ChatCompletionLogprobs"
|
|
}
|
|
],
|
|
"nullable": true
|
|
},
|
|
"message": {
|
|
"$ref": "#/components/schemas/OutputMessage"
|
|
}
|
|
}
|
|
},
|
|
"ChatCompletionDelta": {
|
|
"oneOf": [
|
|
{
|
|
"$ref": "#/components/schemas/TextMessage"
|
|
},
|
|
{
|
|
"$ref": "#/components/schemas/ToolCallDelta"
|
|
}
|
|
]
|
|
},
|
|
"ChatCompletionLogprob": {
|
|
"type": "object",
|
|
"required": [
|
|
"token",
|
|
"logprob",
|
|
"top_logprobs"
|
|
],
|
|
"properties": {
|
|
"logprob": {
|
|
"type": "number",
|
|
"format": "float"
|
|
},
|
|
"token": {
|
|
"type": "string"
|
|
},
|
|
"top_logprobs": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/ChatCompletionTopLogprob"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"ChatCompletionLogprobs": {
|
|
"type": "object",
|
|
"required": [
|
|
"content"
|
|
],
|
|
"properties": {
|
|
"content": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/ChatCompletionLogprob"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"ChatCompletionTopLogprob": {
|
|
"type": "object",
|
|
"required": [
|
|
"token",
|
|
"logprob"
|
|
],
|
|
"properties": {
|
|
"logprob": {
|
|
"type": "number",
|
|
"format": "float"
|
|
},
|
|
"token": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"ChatRequest": {
|
|
"type": "object",
|
|
"required": [
|
|
"messages"
|
|
],
|
|
"properties": {
|
|
"frequency_penalty": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
|
|
"example": "1.0",
|
|
"nullable": true
|
|
},
|
|
"guideline": {
|
|
"type": "string",
|
|
"description": "A guideline to be used in the chat_template",
|
|
"default": "null",
|
|
"example": "null",
|
|
"nullable": true
|
|
},
|
|
"logit_bias": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "number",
|
|
"format": "float"
|
|
},
|
|
"description": "UNUSED\nModify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens\n(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,\nbut values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should\nresult in a ban or exclusive selection of the relevant token.",
|
|
"nullable": true
|
|
},
|
|
"logprobs": {
|
|
"type": "boolean",
|
|
"description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each\noutput token returned in the content of message.",
|
|
"example": "false",
|
|
"nullable": true
|
|
},
|
|
"max_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"description": "The maximum number of tokens that can be generated in the chat completion.",
|
|
"example": "32",
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"messages": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/Message"
|
|
},
|
|
"description": "A list of messages comprising the conversation so far.",
|
|
"example": "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]"
|
|
},
|
|
"model": {
|
|
"type": "string",
|
|
"description": "[UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
|
|
"example": "mistralai/Mistral-7B-Instruct-v0.2",
|
|
"nullable": true
|
|
},
|
|
"n": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"description": "UNUSED\nHow many chat completion choices to generate for each input message. Note that you will be charged based on the\nnumber of generated tokens across all of the choices. Keep n as 1 to minimize costs.",
|
|
"example": "2",
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"presence_penalty": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\nincreasing the model's likelihood to talk about new topics",
|
|
"example": 0.1,
|
|
"nullable": true
|
|
},
|
|
"response_format": {
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/GrammarType"
|
|
}
|
|
],
|
|
"default": "null",
|
|
"nullable": true
|
|
},
|
|
"seed": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": 42,
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"stop": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "string"
|
|
},
|
|
"description": "Up to 4 sequences where the API will stop generating further tokens.",
|
|
"example": "null",
|
|
"nullable": true
|
|
},
|
|
"stream": {
|
|
"type": "boolean"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.",
|
|
"example": 1.0,
|
|
"nullable": true
|
|
},
|
|
"tool_choice": {
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/ToolChoice"
|
|
}
|
|
],
|
|
"nullable": true
|
|
},
|
|
"tool_prompt": {
|
|
"type": "string",
|
|
"description": "A prompt to be appended before the tools",
|
|
"example": "Given the functions available, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. Respond in the format {name: function name, parameters: dictionary of argument name and its value}.Do not use variables.",
|
|
"nullable": true
|
|
},
|
|
"tools": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/Tool"
|
|
},
|
|
"description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of\nfunctions the model may generate JSON inputs for.",
|
|
"example": "null",
|
|
"nullable": true
|
|
},
|
|
"top_logprobs": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"description": "An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with\nan associated log probability. logprobs must be set to true if this parameter is used.",
|
|
"example": "5",
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"top_p": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
|
"example": 0.95,
|
|
"nullable": true
|
|
}
|
|
}
|
|
},
|
|
"Chunk": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"created",
|
|
"choices",
|
|
"model",
|
|
"system_fingerprint"
|
|
],
|
|
"properties": {
|
|
"choices": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/CompletionComplete"
|
|
}
|
|
},
|
|
"created": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"minimum": 0
|
|
},
|
|
"id": {
|
|
"type": "string"
|
|
},
|
|
"model": {
|
|
"type": "string"
|
|
},
|
|
"system_fingerprint": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"CompatGenerateRequest": {
|
|
"type": "object",
|
|
"required": [
|
|
"inputs"
|
|
],
|
|
"properties": {
|
|
"inputs": {
|
|
"type": "string",
|
|
"example": "My name is Olivier and I"
|
|
},
|
|
"parameters": {
|
|
"$ref": "#/components/schemas/GenerateParameters"
|
|
},
|
|
"stream": {
|
|
"type": "boolean",
|
|
"default": "false"
|
|
}
|
|
}
|
|
},
|
|
"Completion": {
|
|
"oneOf": [
|
|
{
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/Chunk"
|
|
},
|
|
{
|
|
"type": "object",
|
|
"required": [
|
|
"object"
|
|
],
|
|
"properties": {
|
|
"object": {
|
|
"type": "string",
|
|
"enum": [
|
|
"text_completion"
|
|
]
|
|
}
|
|
}
|
|
}
|
|
]
|
|
},
|
|
{
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/CompletionFinal"
|
|
},
|
|
{
|
|
"type": "object",
|
|
"required": [
|
|
"object"
|
|
],
|
|
"properties": {
|
|
"object": {
|
|
"type": "string",
|
|
"enum": [
|
|
"text_completion"
|
|
]
|
|
}
|
|
}
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"discriminator": {
|
|
"propertyName": "object"
|
|
}
|
|
},
|
|
"CompletionComplete": {
|
|
"type": "object",
|
|
"required": [
|
|
"index",
|
|
"text",
|
|
"finish_reason"
|
|
],
|
|
"properties": {
|
|
"finish_reason": {
|
|
"type": "string"
|
|
},
|
|
"index": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"minimum": 0
|
|
},
|
|
"logprobs": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "number",
|
|
"format": "float"
|
|
},
|
|
"nullable": true
|
|
},
|
|
"text": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"CompletionFinal": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"created",
|
|
"model",
|
|
"system_fingerprint",
|
|
"choices",
|
|
"usage"
|
|
],
|
|
"properties": {
|
|
"choices": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/CompletionComplete"
|
|
}
|
|
},
|
|
"created": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": "1706270835",
|
|
"minimum": 0
|
|
},
|
|
"id": {
|
|
"type": "string"
|
|
},
|
|
"model": {
|
|
"type": "string",
|
|
"example": "mistralai/Mistral-7B-Instruct-v0.2"
|
|
},
|
|
"system_fingerprint": {
|
|
"type": "string"
|
|
},
|
|
"usage": {
|
|
"$ref": "#/components/schemas/Usage"
|
|
}
|
|
}
|
|
},
|
|
"CompletionRequest": {
|
|
"type": "object",
|
|
"required": [
|
|
"prompt"
|
|
],
|
|
"properties": {
|
|
"frequency_penalty": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
|
|
"example": "1.0",
|
|
"nullable": true
|
|
},
|
|
"max_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"description": "The maximum number of tokens that can be generated in the chat completion.",
|
|
"default": "32",
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"model": {
|
|
"type": "string",
|
|
"description": "UNUSED\nID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
|
|
"example": "mistralai/Mistral-7B-Instruct-v0.2",
|
|
"nullable": true
|
|
},
|
|
"prompt": {
|
|
"$ref": "#/components/schemas/Prompt"
|
|
},
|
|
"repetition_penalty": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"nullable": true
|
|
},
|
|
"seed": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": 42,
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"stop": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "string"
|
|
},
|
|
"description": "Up to 4 sequences where the API will stop generating further tokens.",
|
|
"example": "null",
|
|
"nullable": true
|
|
},
|
|
"stream": {
|
|
"type": "boolean"
|
|
},
|
|
"suffix": {
|
|
"type": "string",
|
|
"description": "The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.\nplease see the completion_template field in the model's tokenizer_config.json file for completion template.",
|
|
"nullable": true
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.",
|
|
"example": 1.0,
|
|
"nullable": true
|
|
},
|
|
"top_p": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
|
|
"example": 0.95,
|
|
"nullable": true
|
|
}
|
|
}
|
|
},
|
|
"DeltaToolCall": {
|
|
"type": "object",
|
|
"required": [
|
|
"index",
|
|
"id",
|
|
"type",
|
|
"function"
|
|
],
|
|
"properties": {
|
|
"function": {
|
|
"$ref": "#/components/schemas/Function"
|
|
},
|
|
"id": {
|
|
"type": "string"
|
|
},
|
|
"index": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"minimum": 0
|
|
},
|
|
"type": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"Details": {
|
|
"type": "object",
|
|
"required": [
|
|
"finish_reason",
|
|
"generated_tokens",
|
|
"prefill",
|
|
"tokens"
|
|
],
|
|
"properties": {
|
|
"best_of_sequences": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/BestOfSequence"
|
|
},
|
|
"nullable": true
|
|
},
|
|
"finish_reason": {
|
|
"$ref": "#/components/schemas/FinishReason"
|
|
},
|
|
"generated_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"example": 1,
|
|
"minimum": 0
|
|
},
|
|
"prefill": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/PrefillToken"
|
|
}
|
|
},
|
|
"seed": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": 42,
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"tokens": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/Token"
|
|
}
|
|
},
|
|
"top_tokens": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/Token"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"ErrorResponse": {
|
|
"type": "object",
|
|
"required": [
|
|
"error",
|
|
"error_type"
|
|
],
|
|
"properties": {
|
|
"error": {
|
|
"type": "string"
|
|
},
|
|
"error_type": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"FinishReason": {
|
|
"type": "string",
|
|
"enum": [
|
|
"length",
|
|
"eos_token",
|
|
"stop_sequence"
|
|
],
|
|
"example": "Length"
|
|
},
|
|
"Function": {
|
|
"type": "object",
|
|
"required": [
|
|
"arguments"
|
|
],
|
|
"properties": {
|
|
"arguments": {
|
|
"type": "string"
|
|
},
|
|
"name": {
|
|
"type": "string",
|
|
"nullable": true
|
|
}
|
|
}
|
|
},
|
|
"FunctionDefinition": {
|
|
"type": "object",
|
|
"required": [
|
|
"name",
|
|
"arguments"
|
|
],
|
|
"properties": {
|
|
"arguments": {},
|
|
"description": {
|
|
"type": "string",
|
|
"nullable": true
|
|
},
|
|
"name": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"FunctionName": {
|
|
"type": "object",
|
|
"required": [
|
|
"name"
|
|
],
|
|
"properties": {
|
|
"name": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"GenerateParameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"adapter_id": {
|
|
"type": "string",
|
|
"description": "Lora adapter id",
|
|
"default": "null",
|
|
"example": "null",
|
|
"nullable": true
|
|
},
|
|
"best_of": {
|
|
"type": "integer",
|
|
"description": "Generate best_of sequences and return the one if the highest token logprobs.",
|
|
"default": "null",
|
|
"example": 1,
|
|
"nullable": true,
|
|
"minimum": 0,
|
|
"exclusiveMinimum": 0
|
|
},
|
|
"decoder_input_details": {
|
|
"type": "boolean",
|
|
"description": "Whether to return decoder input token logprobs and ids.",
|
|
"default": "false"
|
|
},
|
|
"details": {
|
|
"type": "boolean",
|
|
"description": "Whether to return generation details.",
|
|
"default": "true"
|
|
},
|
|
"do_sample": {
|
|
"type": "boolean",
|
|
"description": "Activate logits sampling.",
|
|
"default": "false",
|
|
"example": true
|
|
},
|
|
"frequency_penalty": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "The parameter for frequency penalty. 1.0 means no penalty\nPenalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
|
|
"default": "null",
|
|
"example": 0.1,
|
|
"nullable": true,
|
|
"exclusiveMinimum": -2
|
|
},
|
|
"grammar": {
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/GrammarType"
|
|
}
|
|
],
|
|
"default": "null",
|
|
"nullable": true
|
|
},
|
|
"max_new_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"description": "Maximum number of tokens to generate.",
|
|
"default": "100",
|
|
"example": "20",
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"repetition_penalty": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "The parameter for repetition penalty. 1.0 means no penalty.\nSee [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.",
|
|
"default": "null",
|
|
"example": 1.03,
|
|
"nullable": true,
|
|
"exclusiveMinimum": 0
|
|
},
|
|
"return_full_text": {
|
|
"type": "boolean",
|
|
"description": "Whether to prepend the prompt to the generated text",
|
|
"default": "null",
|
|
"example": false,
|
|
"nullable": true
|
|
},
|
|
"seed": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"description": "Random sampling seed.",
|
|
"default": "null",
|
|
"example": "null",
|
|
"nullable": true,
|
|
"minimum": 0,
|
|
"exclusiveMinimum": 0
|
|
},
|
|
"stop": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "string"
|
|
},
|
|
"description": "Stop generating tokens if a member of `stop` is generated.",
|
|
"example": [
|
|
"photographer"
|
|
],
|
|
"maxItems": 4
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "The value used to module the logits distribution.",
|
|
"default": "null",
|
|
"example": 0.5,
|
|
"nullable": true,
|
|
"exclusiveMinimum": 0
|
|
},
|
|
"top_k": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"description": "The number of highest probability vocabulary tokens to keep for top-k-filtering.",
|
|
"default": "null",
|
|
"example": 10,
|
|
"nullable": true,
|
|
"exclusiveMinimum": 0
|
|
},
|
|
"top_n_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"description": "The number of highest probability vocabulary tokens to keep for top-n-filtering.",
|
|
"default": "null",
|
|
"example": 5,
|
|
"nullable": true,
|
|
"minimum": 0,
|
|
"exclusiveMinimum": 0
|
|
},
|
|
"top_p": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "Top-p value for nucleus sampling.",
|
|
"default": "null",
|
|
"example": 0.95,
|
|
"nullable": true,
|
|
"maximum": 1,
|
|
"exclusiveMinimum": 0
|
|
},
|
|
"truncate": {
|
|
"type": "integer",
|
|
"description": "Truncate inputs tokens to the given size.",
|
|
"default": "null",
|
|
"example": "null",
|
|
"nullable": true,
|
|
"minimum": 0
|
|
},
|
|
"typical_p": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"description": "Typical Decoding mass\nSee [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information.",
|
|
"default": "null",
|
|
"example": 0.95,
|
|
"nullable": true,
|
|
"maximum": 1,
|
|
"exclusiveMinimum": 0
|
|
},
|
|
"watermark": {
|
|
"type": "boolean",
|
|
"description": "Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226).",
|
|
"default": "false",
|
|
"example": true
|
|
}
|
|
}
|
|
},
|
|
"GenerateRequest": {
|
|
"type": "object",
|
|
"required": [
|
|
"inputs"
|
|
],
|
|
"properties": {
|
|
"inputs": {
|
|
"type": "string",
|
|
"example": "My name is Olivier and I"
|
|
},
|
|
"parameters": {
|
|
"$ref": "#/components/schemas/GenerateParameters"
|
|
}
|
|
}
|
|
},
|
|
"GenerateResponse": {
|
|
"type": "object",
|
|
"required": [
|
|
"generated_text"
|
|
],
|
|
"properties": {
|
|
"details": {
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/Details"
|
|
}
|
|
],
|
|
"nullable": true
|
|
},
|
|
"generated_text": {
|
|
"type": "string",
|
|
"example": "test"
|
|
}
|
|
}
|
|
},
|
|
"GrammarType": {
|
|
"oneOf": [
|
|
{
|
|
"type": "object",
|
|
"required": [
|
|
"type",
|
|
"value"
|
|
],
|
|
"properties": {
|
|
"type": {
|
|
"type": "string",
|
|
"enum": [
|
|
"json"
|
|
]
|
|
},
|
|
"value": {
|
|
"description": "A string that represents a [JSON Schema](https://json-schema.org/).\n\nJSON Schema is a declarative language that allows to annotate JSON documents\nwith types and descriptions."
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"type": "object",
|
|
"required": [
|
|
"type",
|
|
"value"
|
|
],
|
|
"properties": {
|
|
"type": {
|
|
"type": "string",
|
|
"enum": [
|
|
"regex"
|
|
]
|
|
},
|
|
"value": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
}
|
|
],
|
|
"discriminator": {
|
|
"propertyName": "type"
|
|
}
|
|
},
|
|
"Info": {
|
|
"type": "object",
|
|
"required": [
|
|
"model_id",
|
|
"max_concurrent_requests",
|
|
"max_best_of",
|
|
"max_stop_sequences",
|
|
"max_input_tokens",
|
|
"max_total_tokens",
|
|
"validation_workers",
|
|
"max_client_batch_size",
|
|
"router",
|
|
"version"
|
|
],
|
|
"properties": {
|
|
"docker_label": {
|
|
"type": "string",
|
|
"example": "null",
|
|
"nullable": true
|
|
},
|
|
"max_best_of": {
|
|
"type": "integer",
|
|
"example": "2",
|
|
"minimum": 0
|
|
},
|
|
"max_client_batch_size": {
|
|
"type": "integer",
|
|
"example": "32",
|
|
"minimum": 0
|
|
},
|
|
"max_concurrent_requests": {
|
|
"type": "integer",
|
|
"description": "Router Parameters",
|
|
"example": "128",
|
|
"minimum": 0
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "integer",
|
|
"example": "1024",
|
|
"minimum": 0
|
|
},
|
|
"max_stop_sequences": {
|
|
"type": "integer",
|
|
"example": "4",
|
|
"minimum": 0
|
|
},
|
|
"max_total_tokens": {
|
|
"type": "integer",
|
|
"example": "2048",
|
|
"minimum": 0
|
|
},
|
|
"model_id": {
|
|
"type": "string",
|
|
"description": "Model info",
|
|
"example": "bigscience/blomm-560m"
|
|
},
|
|
"model_pipeline_tag": {
|
|
"type": "string",
|
|
"example": "text-generation",
|
|
"nullable": true
|
|
},
|
|
"model_sha": {
|
|
"type": "string",
|
|
"example": "e985a63cdc139290c5f700ff1929f0b5942cced2",
|
|
"nullable": true
|
|
},
|
|
"router": {
|
|
"type": "string",
|
|
"description": "Router Info",
|
|
"example": "text-generation-router"
|
|
},
|
|
"sha": {
|
|
"type": "string",
|
|
"example": "null",
|
|
"nullable": true
|
|
},
|
|
"validation_workers": {
|
|
"type": "integer",
|
|
"example": "2",
|
|
"minimum": 0
|
|
},
|
|
"version": {
|
|
"type": "string",
|
|
"example": "0.5.0"
|
|
}
|
|
}
|
|
},
|
|
"Message": {
|
|
"type": "object",
|
|
"required": [
|
|
"role",
|
|
"content"
|
|
],
|
|
"properties": {
|
|
"content": {
|
|
"$ref": "#/components/schemas/MessageContent"
|
|
},
|
|
"name": {
|
|
"type": "string",
|
|
"example": "\"David\"",
|
|
"nullable": true
|
|
},
|
|
"role": {
|
|
"type": "string",
|
|
"example": "user"
|
|
}
|
|
}
|
|
},
|
|
"MessageChunk": {
|
|
"oneOf": [
|
|
{
|
|
"type": "object",
|
|
"required": [
|
|
"text",
|
|
"type"
|
|
],
|
|
"properties": {
|
|
"text": {
|
|
"type": "string"
|
|
},
|
|
"type": {
|
|
"type": "string",
|
|
"enum": [
|
|
"text"
|
|
]
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"type": "object",
|
|
"required": [
|
|
"image_url",
|
|
"type"
|
|
],
|
|
"properties": {
|
|
"image_url": {
|
|
"$ref": "#/components/schemas/Url"
|
|
},
|
|
"type": {
|
|
"type": "string",
|
|
"enum": [
|
|
"image_url"
|
|
]
|
|
}
|
|
}
|
|
}
|
|
],
|
|
"discriminator": {
|
|
"propertyName": "type"
|
|
}
|
|
},
|
|
"MessageContent": {
|
|
"oneOf": [
|
|
{
|
|
"type": "string"
|
|
},
|
|
{
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/MessageChunk"
|
|
}
|
|
}
|
|
]
|
|
},
|
|
"ModelInfo": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"object",
|
|
"created",
|
|
"owned_by"
|
|
],
|
|
"properties": {
|
|
"created": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": 1686935002,
|
|
"minimum": 0
|
|
},
|
|
"id": {
|
|
"type": "string",
|
|
"example": "gpt2"
|
|
},
|
|
"object": {
|
|
"type": "string",
|
|
"example": "model"
|
|
},
|
|
"owned_by": {
|
|
"type": "string",
|
|
"example": "openai"
|
|
}
|
|
}
|
|
},
|
|
"OutputMessage": {
|
|
"oneOf": [
|
|
{
|
|
"$ref": "#/components/schemas/TextMessage"
|
|
},
|
|
{
|
|
"$ref": "#/components/schemas/ToolCallMessage"
|
|
}
|
|
]
|
|
},
|
|
"PrefillToken": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"text",
|
|
"logprob"
|
|
],
|
|
"properties": {
|
|
"id": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"example": 0,
|
|
"minimum": 0
|
|
},
|
|
"logprob": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"example": -0.34,
|
|
"nullable": true
|
|
},
|
|
"text": {
|
|
"type": "string",
|
|
"example": "test"
|
|
}
|
|
}
|
|
},
|
|
"Prompt": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "string"
|
|
}
|
|
},
|
|
"SimpleToken": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"text",
|
|
"start",
|
|
"stop"
|
|
],
|
|
"properties": {
|
|
"id": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"example": 0,
|
|
"minimum": 0
|
|
},
|
|
"start": {
|
|
"type": "integer",
|
|
"example": 0,
|
|
"minimum": 0
|
|
},
|
|
"stop": {
|
|
"type": "integer",
|
|
"example": 2,
|
|
"minimum": 0
|
|
},
|
|
"text": {
|
|
"type": "string",
|
|
"example": "test"
|
|
}
|
|
}
|
|
},
|
|
"StreamDetails": {
|
|
"type": "object",
|
|
"required": [
|
|
"finish_reason",
|
|
"generated_tokens",
|
|
"input_length"
|
|
],
|
|
"properties": {
|
|
"finish_reason": {
|
|
"$ref": "#/components/schemas/FinishReason"
|
|
},
|
|
"generated_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"example": 1,
|
|
"minimum": 0
|
|
},
|
|
"input_length": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"example": 1,
|
|
"minimum": 0
|
|
},
|
|
"seed": {
|
|
"type": "integer",
|
|
"format": "int64",
|
|
"example": 42,
|
|
"nullable": true,
|
|
"minimum": 0
|
|
}
|
|
}
|
|
},
|
|
"StreamResponse": {
|
|
"type": "object",
|
|
"required": [
|
|
"index",
|
|
"token"
|
|
],
|
|
"properties": {
|
|
"details": {
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/StreamDetails"
|
|
}
|
|
],
|
|
"default": "null",
|
|
"nullable": true
|
|
},
|
|
"generated_text": {
|
|
"type": "string",
|
|
"default": "null",
|
|
"example": "test",
|
|
"nullable": true
|
|
},
|
|
"index": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"minimum": 0
|
|
},
|
|
"token": {
|
|
"$ref": "#/components/schemas/Token"
|
|
},
|
|
"top_tokens": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/Token"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"TextMessage": {
|
|
"type": "object",
|
|
"required": [
|
|
"role",
|
|
"content"
|
|
],
|
|
"properties": {
|
|
"content": {
|
|
"type": "string",
|
|
"example": "My name is David and I"
|
|
},
|
|
"role": {
|
|
"type": "string",
|
|
"example": "user"
|
|
}
|
|
}
|
|
},
|
|
"Token": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"text",
|
|
"logprob",
|
|
"special"
|
|
],
|
|
"properties": {
|
|
"id": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"example": 0,
|
|
"minimum": 0
|
|
},
|
|
"logprob": {
|
|
"type": "number",
|
|
"format": "float",
|
|
"example": -0.34,
|
|
"nullable": true
|
|
},
|
|
"special": {
|
|
"type": "boolean",
|
|
"example": "false"
|
|
},
|
|
"text": {
|
|
"type": "string",
|
|
"example": "test"
|
|
}
|
|
}
|
|
},
|
|
"TokenizeResponse": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/SimpleToken"
|
|
}
|
|
},
|
|
"Tool": {
|
|
"type": "object",
|
|
"required": [
|
|
"type",
|
|
"function"
|
|
],
|
|
"properties": {
|
|
"function": {
|
|
"$ref": "#/components/schemas/FunctionDefinition"
|
|
},
|
|
"type": {
|
|
"type": "string",
|
|
"example": "function"
|
|
}
|
|
}
|
|
},
|
|
"ToolCall": {
|
|
"type": "object",
|
|
"required": [
|
|
"id",
|
|
"type",
|
|
"function"
|
|
],
|
|
"properties": {
|
|
"function": {
|
|
"$ref": "#/components/schemas/FunctionDefinition"
|
|
},
|
|
"id": {
|
|
"type": "string"
|
|
},
|
|
"type": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"ToolCallDelta": {
|
|
"type": "object",
|
|
"required": [
|
|
"role",
|
|
"tool_calls"
|
|
],
|
|
"properties": {
|
|
"role": {
|
|
"type": "string",
|
|
"example": "assistant"
|
|
},
|
|
"tool_calls": {
|
|
"$ref": "#/components/schemas/DeltaToolCall"
|
|
}
|
|
}
|
|
},
|
|
"ToolCallMessage": {
|
|
"type": "object",
|
|
"required": [
|
|
"role",
|
|
"tool_calls"
|
|
],
|
|
"properties": {
|
|
"role": {
|
|
"type": "string",
|
|
"example": "assistant"
|
|
},
|
|
"tool_calls": {
|
|
"type": "array",
|
|
"items": {
|
|
"$ref": "#/components/schemas/ToolCall"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"ToolChoice": {
|
|
"allOf": [
|
|
{
|
|
"$ref": "#/components/schemas/ToolType"
|
|
}
|
|
],
|
|
"nullable": true
|
|
},
|
|
"ToolType": {
|
|
"oneOf": [
|
|
{
|
|
"type": "object",
|
|
"default": null,
|
|
"nullable": true
|
|
},
|
|
{
|
|
"type": "string"
|
|
},
|
|
{
|
|
"type": "object",
|
|
"required": [
|
|
"function"
|
|
],
|
|
"properties": {
|
|
"function": {
|
|
"$ref": "#/components/schemas/FunctionName"
|
|
}
|
|
}
|
|
},
|
|
{
|
|
"type": "object",
|
|
"default": null,
|
|
"nullable": true
|
|
}
|
|
]
|
|
},
|
|
"Url": {
|
|
"type": "object",
|
|
"required": [
|
|
"url"
|
|
],
|
|
"properties": {
|
|
"url": {
|
|
"type": "string"
|
|
}
|
|
}
|
|
},
|
|
"Usage": {
|
|
"type": "object",
|
|
"required": [
|
|
"prompt_tokens",
|
|
"completion_tokens",
|
|
"total_tokens"
|
|
],
|
|
"properties": {
|
|
"completion_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"minimum": 0
|
|
},
|
|
"prompt_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"minimum": 0
|
|
},
|
|
"total_tokens": {
|
|
"type": "integer",
|
|
"format": "int32",
|
|
"minimum": 0
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"tags": [
|
|
{
|
|
"name": "Text Generation Inference",
|
|
"description": "Hugging Face Text Generation Inference API"
|
|
}
|
|
]
|
|
}
|