hf_text-generation-inference/proto/generate.proto

110 lines
2.7 KiB
Protocol Buffer
Raw Normal View History

2022-10-08 04:30:12 -06:00
syntax = "proto3";
package generate.v1;
service TextGenerationService {
2022-10-08 04:30:12 -06:00
/// Service discovery
rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {}
2022-10-08 04:30:12 -06:00
/// Empties batch cache
rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse);
/// Generate tokens for a batch
rpc Generate (GenerateRequest) returns (GenerateResponse);
/// Generate tokens for a list of cached batches
rpc GenerateWithCache (GenerateWithCacheRequest) returns (GenerateWithCacheResponse);
2022-10-08 04:30:12 -06:00
}
/// Empty request
message ServiceDiscoveryRequest {}
2022-10-08 04:30:12 -06:00
message ServiceDiscoveryResponse {
/// Other shards urls
2022-10-08 04:30:12 -06:00
repeated string urls = 1;
}
/// Empty request
message ClearCacheRequest {}
/// Empty response
message ClearCacheResponse {}
2022-12-15 09:03:56 -07:00
message NextTokenChooserParameters {
2022-12-12 10:25:22 -07:00
/// exponential scaling output probability distribution
2022-10-08 04:30:12 -06:00
float temperature = 1;
2022-12-12 10:25:22 -07:00
/// restricting to the k highest probability elements
2022-10-08 04:30:12 -06:00
uint32 top_k = 2;
2022-12-12 10:25:22 -07:00
/// restricting to top tokens summing to prob_cut_off <= prob_cut_off
2022-10-08 04:30:12 -06:00
float top_p = 3;
2022-12-12 10:25:22 -07:00
/// apply sampling on the logits
2022-10-08 04:30:12 -06:00
bool do_sample = 4;
}
2022-12-12 10:25:22 -07:00
message StoppingCriteriaParameters {
/// Maximum number of generated tokens
uint32 max_new_tokens = 1;
/// Optional stopping sequences
repeated string stop_sequences = 2;
}
2022-10-08 04:30:12 -06:00
message Request {
/// Request ID
uint64 id = 1;
/// The generation context
string inputs = 2;
/// The number of tokens inside inputs
uint32 input_length = 3;
2022-12-15 09:03:56 -07:00
/// Next Token Chooser Parameters
NextTokenChooserParameters parameters = 4;
2022-12-12 10:25:22 -07:00
/// Stopping Criteria Parameters
StoppingCriteriaParameters stopping_parameters = 5;
2022-10-08 04:30:12 -06:00
}
message Batch {
/// Batch ID
uint64 id = 1;
/// Individual requests
repeated Request requests = 2;
/// Batch size (==len(requests))
uint32 size = 3;
2022-10-08 04:30:12 -06:00
}
message GeneratedText {
/// Request
Request request = 1;
2022-10-08 04:30:12 -06:00
/// Output
2022-12-15 09:03:56 -07:00
string output_text = 2;
/// Number of generated tokens
2022-12-15 09:03:56 -07:00
uint32 generated_tokens = 3;
/// Tokens
repeated string tokens = 4;
/// Token IDs
repeated uint32 token_ids = 5;
/// Logprobs
repeated float logprobs = 6;
2022-12-12 10:25:22 -07:00
/// Finish reason
2022-12-15 09:03:56 -07:00
string finish_reason = 7;
2022-10-08 04:30:12 -06:00
}
message GenerateRequest {
/// Batch
Batch batch = 1;
2022-10-08 04:30:12 -06:00
}
message GenerateResponse {
/// Finished requests
repeated GeneratedText generated_texts = 1;
/// Next batch (cached)
optional Batch batch = 2;
2022-10-08 04:30:12 -06:00
}
message GenerateWithCacheRequest {
/// Cached batches
repeated Batch batches = 1;
}
2022-10-08 04:30:12 -06:00
message GenerateWithCacheResponse {
/// Finished requests
repeated GeneratedText generated_texts = 1;
/// Next batch (cached)
optional Batch batch = 2;
}