diff --git a/backends/llamacpp/csrc/ffi.hpp b/backends/llamacpp/csrc/ffi.hpp index 51a524cb..70669b7c 100644 --- a/backends/llamacpp/csrc/ffi.hpp +++ b/backends/llamacpp/csrc/ffi.hpp @@ -56,8 +56,8 @@ namespace huggingface::tgi::backends::llamacpp { }; // Ask the compiler to create view over Rust slice transmuting from uint32_t* to llama_token* - auto input_tokens_v = - std::span(reinterpret_cast(input_tokens.data()), input_tokens.size()); + auto input_tokens_v = std::vector(input_tokens.size()); + std::memcpy(input_tokens_v.data(), input_tokens.data(), input_tokens.size()); const auto generation_context = generation_context_t {generation_params, sampling_params, input_tokens_v}; if(const auto result = worker_.generate(generation_context, context_forwarding_callback); result.has_value()) [[likely]] {