feat(backend): remove reinterpret_cast converting from uint32_t to llama_token(int32_t)

This commit is contained in:
Morgan Funtowicz 2024-11-09 22:19:38 +01:00
parent 86d30aea43
commit 6915fa3441
1 changed files with 2 additions and 2 deletions

View File

@ -56,8 +56,8 @@ namespace huggingface::tgi::backends::llamacpp {
}; };
// Ask the compiler to create view over Rust slice transmuting from uint32_t* to llama_token* // Ask the compiler to create view over Rust slice transmuting from uint32_t* to llama_token*
auto input_tokens_v = auto input_tokens_v = std::vector<llama_token>(input_tokens.size());
std::span(reinterpret_cast<const llama_token *>(input_tokens.data()), input_tokens.size()); std::memcpy(input_tokens_v.data(), input_tokens.data(), input_tokens.size());
const auto generation_context = generation_context_t {generation_params, sampling_params, input_tokens_v}; const auto generation_context = generation_context_t {generation_params, sampling_params, input_tokens_v};
if(const auto result = worker_.generate(generation_context, context_forwarding_callback); result.has_value()) [[likely]] { if(const auto result = worker_.generate(generation_context, context_forwarding_callback); result.has_value()) [[likely]] {