2024-10-24 01:56:40 -06:00
|
|
|
//
|
|
|
|
// Created by mfuntowicz on 10/23/24.
|
|
|
|
//
|
|
|
|
|
|
|
|
#ifndef TGI_LLAMA_CPP_BACKEND_FFI_HPP
|
|
|
|
#define TGI_LLAMA_CPP_BACKEND_FFI_HPP
|
|
|
|
|
2024-10-24 08:42:50 -06:00
|
|
|
#include <exception>
|
|
|
|
#include <filesystem>
|
|
|
|
#include <string_view>
|
2024-11-05 15:47:22 -07:00
|
|
|
#include <variant>
|
2024-10-24 08:42:50 -06:00
|
|
|
|
|
|
|
#include <spdlog/spdlog.h>
|
2024-10-24 01:56:40 -06:00
|
|
|
#include "backend.hpp"
|
2024-10-24 08:42:50 -06:00
|
|
|
|
2024-10-31 10:51:57 -06:00
|
|
|
namespace huggingface::tgi::backends::llamacpp {
|
|
|
|
struct generation_params_t;
|
|
|
|
struct sampling_params_t;
|
|
|
|
|
|
|
|
class llama_cpp_backend_impl_t;
|
2024-10-24 08:42:50 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#include "backends/llamacpp/src/lib.rs.h"
|
2024-11-02 17:36:32 -06:00
|
|
|
#include "rust/cxx.h"
|
2024-10-24 01:56:40 -06:00
|
|
|
|
|
|
|
|
2024-10-31 10:51:57 -06:00
|
|
|
namespace huggingface::tgi::backends::llamacpp {
|
|
|
|
|
|
|
|
// Concept identifying types which have a .generate() -> size_t method to do in-place generation
|
|
|
|
template<typename T>
|
2024-11-04 08:17:43 -07:00
|
|
|
concept has_stream_method = requires(
|
2024-10-31 10:51:57 -06:00
|
|
|
T t,
|
|
|
|
std::span<const llama_token> input_tokens,
|
|
|
|
const generation_params_t &generation_params,
|
|
|
|
const sampling_params_t &sampling_params,
|
|
|
|
llama_decode_callback callback
|
|
|
|
) {
|
|
|
|
{
|
2024-11-04 08:17:43 -07:00
|
|
|
t.stream(input_tokens, generation_params, sampling_params, callback)
|
2024-10-31 10:51:57 -06:00
|
|
|
} -> std::same_as<std::expected<size_t, backend_error_t>>;
|
|
|
|
};
|
|
|
|
|
2024-11-04 08:17:43 -07:00
|
|
|
static_assert(has_stream_method<single_worker_backend_t>, "single_worker_backend_t doesn't meet concept has_stream_method");
|
|
|
|
static_assert(has_stream_method<multi_worker_backend_t>, "multi_worker_backend_t doesn't meet concept has_stream_method");
|
2024-10-24 08:42:50 -06:00
|
|
|
|
2024-10-31 10:51:57 -06:00
|
|
|
class llama_cpp_backend_exception_t : std::exception {
|
2024-10-24 08:42:50 -06:00
|
|
|
|
|
|
|
};
|
|
|
|
|
2024-10-31 10:51:57 -06:00
|
|
|
/**
|
|
|
|
* Llama.cpp backend interfacing with Rust FFI layer
|
|
|
|
*/
|
|
|
|
class llama_cpp_backend_impl_t {
|
2024-10-24 08:42:50 -06:00
|
|
|
private:
|
2024-10-31 10:51:57 -06:00
|
|
|
std::variant<single_worker_backend_t, multi_worker_backend_t> mInner_;
|
2024-10-24 01:56:40 -06:00
|
|
|
|
2024-10-24 08:42:50 -06:00
|
|
|
public:
|
2024-10-31 10:51:57 -06:00
|
|
|
explicit llama_cpp_backend_impl_t(single_worker_backend_t &&backend) : mInner_(std::move(backend)) {}
|
|
|
|
|
|
|
|
explicit llama_cpp_backend_impl_t(multi_worker_backend_t &&backend) : mInner_(std::move(backend)) {}
|
2024-10-24 08:42:50 -06:00
|
|
|
|
2024-11-02 17:36:32 -06:00
|
|
|
size_t stream(
|
2024-10-31 10:51:57 -06:00
|
|
|
rust::Slice<const uint32_t> input_tokens,
|
2024-11-02 17:36:32 -06:00
|
|
|
const generation_params_t generation_params,
|
2024-10-31 10:51:57 -06:00
|
|
|
const sampling_params_t &sampling_params,
|
2024-11-04 08:17:43 -07:00
|
|
|
InferContext *ctx,
|
2024-11-04 09:01:22 -07:00
|
|
|
rust::Fn<bool(InferContext *, uint32_t, float_t, bool, size_t)> callback
|
2024-10-31 10:51:57 -06:00
|
|
|
) {
|
|
|
|
// Define the visitor lambda function which requires the has_emplace_generate constraint on T
|
2024-11-04 08:17:43 -07:00
|
|
|
auto inner_fw = [=, &sampling_params, &ctx, &callback]<has_stream_method T>(T &&backend)
|
2024-10-31 10:51:57 -06:00
|
|
|
-> std::expected<size_t, backend_error_t> {
|
|
|
|
|
2024-11-04 09:01:22 -07:00
|
|
|
auto context_forwarding_callback = [=, &ctx](uint32_t new_token_id, float_t logits, bool is_eos, size_t n_generated_tokens) -> bool {
|
|
|
|
return callback(ctx, new_token_id, logits, is_eos, n_generated_tokens);
|
2024-11-02 17:36:32 -06:00
|
|
|
};
|
|
|
|
|
2024-10-31 10:51:57 -06:00
|
|
|
// Ask the compiler to create view over Rust slice transmuting from uint32_t* to int32_t*
|
|
|
|
auto input_tokens_v =
|
|
|
|
std::span(reinterpret_cast<const llama_token *>(input_tokens.data()), input_tokens.size());
|
|
|
|
|
2024-11-04 08:17:43 -07:00
|
|
|
return backend.stream(
|
2024-11-02 17:36:32 -06:00
|
|
|
input_tokens_v,
|
|
|
|
generation_params,
|
|
|
|
sampling_params,
|
|
|
|
context_forwarding_callback
|
|
|
|
);
|
2024-10-31 10:51:57 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
if (const auto result = std::visit(inner_fw, mInner_); result.has_value()) {
|
|
|
|
return *result;
|
|
|
|
} else {
|
|
|
|
throw llama_cpp_backend_exception_t();
|
|
|
|
}
|
2024-10-24 08:42:50 -06:00
|
|
|
}
|
2024-10-31 10:51:57 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
std::unique_ptr<llama_cpp_backend_impl_t> create_single_worker_backend(rust::Str modelPath) {
|
|
|
|
const auto cxxPath = std::string(modelPath);
|
|
|
|
auto params = llama_model_default_params();
|
|
|
|
params.use_mmap = true;
|
|
|
|
|
|
|
|
auto *model = llama_load_model_from_file(cxxPath.c_str(), params);
|
|
|
|
auto backend = single_worker_backend_t(model, std::nullopt);
|
|
|
|
return std::make_unique<llama_cpp_backend_impl_t>(std::move(backend));
|
2024-10-24 08:42:50 -06:00
|
|
|
}
|
2024-10-24 01:56:40 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#endif //TGI_LLAMA_CPP_BACKEND_FFI_HPP
|