fix microsoft/Phi-3-mini-4k-instruct crash in batch.slots[batch.slot_… (#2148)

* fix microsoft/Phi-3-mini-4k-instruct crash in batch.slots[batch.slot_indices]

Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>

* Apply suggestions from code review

---------

Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
This commit is contained in:
Wang, Yi 2024-07-01 17:27:53 +08:00 committed by GitHub
parent fb98ab273f
commit 6ea570ddfe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 3 additions and 3 deletions

View File

@ -17,7 +17,7 @@ use text_generation_router::{
server, HubModelInfo, HubPreprocessorConfig, HubProcessorConfig, HubTokenizerConfig,
};
use thiserror::Error;
use tokenizers::{processors::template::TemplateProcessing, Tokenizer};
use tokenizers::{processors::template::TemplateProcessing, Tokenizer, PostProcessor};
use tower_http::cors::AllowOrigin;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
@ -309,7 +309,7 @@ async fn main() -> Result<(), RouterError> {
let mut tokenizer = Tokenizer::from_file(filename).ok();
if let Some(tokenizer) = &mut tokenizer {
if let Some(class) = &tokenizer_config.tokenizer_class {
if (class == "LlamaTokenizer" || class == "LlamaTokenizerFast") && tokenizer.get_post_processor().is_none() {
if (class == "LlamaTokenizer" || class == "LlamaTokenizerFast"){
if let Ok(post_processor) = create_post_processor(tokenizer, &tokenizer_config) {
tracing::info!("Overriding LlamaTokenizer with TemplateProcessing to follow python override defined in https://github.com/huggingface/transformers/blob/4aa17d00690b7f82c95bb2949ea57e22c35b4336/src/transformers/models/llama/tokenization_llama_fast.py#L203-L205");
tokenizer.with_post_processor(post_processor);
@ -577,7 +577,7 @@ pub fn create_post_processor(
if add_bos_token {
if let Some(bos) = bos_token {
single.push(format!("{}:1", bos));
pair.push(format!("{}:1", bos));
}
}