2024-10-04 02:42:31 -06:00
|
|
|
//
|
|
|
|
// Created by mfuntowicz on 10/3/24.
|
|
|
|
//
|
|
|
|
|
|
|
|
#include <string_view>
|
|
|
|
#include <fmt/format.h>
|
2024-10-22 07:22:56 -06:00
|
|
|
#include <fmt/std.h>
|
2024-10-04 02:42:31 -06:00
|
|
|
#include <fmt/color.h>
|
|
|
|
#include <spdlog/spdlog.h>
|
|
|
|
#include "../csrc/backend.hpp"
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
if(argc < 2) {
|
|
|
|
fmt::print("No model folder provider");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdlog::set_level(spdlog::level::debug);
|
|
|
|
|
2024-10-22 07:22:56 -06:00
|
|
|
const auto modelPath = absolute(std::filesystem::path(argv[1]));
|
|
|
|
if(auto backend = huggingface::tgi::backends::llama::CreateLlamaCppBackend(modelPath); backend.has_value())
|
|
|
|
fmt::print(fmt::emphasis::bold | fg(fmt::color::yellow), "Successfully initialized llama.cpp model from {}\n", modelPath);
|
2024-10-04 02:42:31 -06:00
|
|
|
}
|