2023-02-07 10:25:17 -07:00
|
|
|
import torch
|
|
|
|
import torch.distributed
|
|
|
|
|
|
|
|
from typing import List, Optional, Tuple
|
|
|
|
|
|
|
|
from transformers import (
|
|
|
|
AutoTokenizer,
|
|
|
|
AutoConfig,
|
|
|
|
)
|
|
|
|
|
2023-03-07 10:52:22 -07:00
|
|
|
from text_generation_server.models import Seq2SeqLM
|
2023-06-08 06:51:52 -06:00
|
|
|
from text_generation_server.models.custom_modeling.t5_modeling import (
|
|
|
|
T5ForConditionalGeneration,
|
|
|
|
)
|
2023-03-07 10:52:22 -07:00
|
|
|
from text_generation_server.utils import (
|
2023-02-07 10:25:17 -07:00
|
|
|
initialize_torch_distributed,
|
|
|
|
weight_files,
|
2023-06-08 06:51:52 -06:00
|
|
|
Weights,
|
2023-02-07 10:25:17 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class T5Sharded(Seq2SeqLM):
|
|
|
|
def __init__(
|
2023-05-12 06:46:41 -06:00
|
|
|
self,
|
|
|
|
model_id: str,
|
|
|
|
revision: Optional[str] = None,
|
|
|
|
quantize: Optional[str] = None,
|
2024-02-26 11:49:28 -07:00
|
|
|
use_medusa: Optional[str] = None,
|
2023-06-30 12:30:09 -06:00
|
|
|
dtype: Optional[torch.dtype] = None,
|
2023-05-23 12:40:39 -06:00
|
|
|
trust_remote_code: bool = False,
|
2023-02-07 10:25:17 -07:00
|
|
|
):
|
2023-05-10 07:48:21 -06:00
|
|
|
self.process_group, rank, world_size = initialize_torch_distributed()
|
2023-02-07 10:25:17 -07:00
|
|
|
if torch.cuda.is_available():
|
2023-05-10 07:48:21 -06:00
|
|
|
device = torch.device(f"cuda:{rank}")
|
2023-06-30 12:30:09 -06:00
|
|
|
dtype = torch.float16 if dtype is None else dtype
|
2023-02-07 10:25:17 -07:00
|
|
|
else:
|
|
|
|
device = torch.device("cpu")
|
2023-09-19 09:19:28 -06:00
|
|
|
dtype = torch.float32 if dtype is None else dtype
|
2023-02-07 10:25:17 -07:00
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
config = AutoConfig.from_pretrained(
|
2023-05-23 12:40:39 -06:00
|
|
|
model_id,
|
|
|
|
revision=revision,
|
|
|
|
trust_remote_code=trust_remote_code,
|
2023-02-07 10:25:17 -07:00
|
|
|
)
|
2023-06-08 06:51:52 -06:00
|
|
|
config.quantize = quantize
|
2024-02-26 11:49:28 -07:00
|
|
|
config.use_medusa = use_medusa
|
2023-02-07 10:25:17 -07:00
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
tokenizer = AutoTokenizer.from_pretrained(
|
2023-05-23 12:40:39 -06:00
|
|
|
model_id,
|
|
|
|
revision=revision,
|
2023-06-08 06:51:52 -06:00
|
|
|
padding_side="left",
|
|
|
|
truncation_side="left",
|
2023-05-23 12:40:39 -06:00
|
|
|
trust_remote_code=trust_remote_code,
|
2023-02-07 10:25:17 -07:00
|
|
|
)
|
|
|
|
tokenizer.bos_token_id = config.decoder_start_token_id
|
|
|
|
|
|
|
|
torch.distributed.barrier(group=self.process_group)
|
|
|
|
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
|
2023-06-08 06:51:52 -06:00
|
|
|
weights = Weights(
|
2023-07-12 02:01:42 -06:00
|
|
|
filenames,
|
|
|
|
device=device,
|
|
|
|
dtype=dtype,
|
|
|
|
process_group=self.process_group,
|
|
|
|
aliases={
|
|
|
|
"shared.weight": [
|
|
|
|
"encoder.embed_tokens.weight",
|
|
|
|
"decoder.embed_tokens.weight",
|
|
|
|
]
|
|
|
|
},
|
2023-06-08 06:51:52 -06:00
|
|
|
)
|
2023-02-07 10:25:17 -07:00
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
model = T5ForConditionalGeneration(config, weights)
|
2023-02-07 10:25:17 -07:00
|
|
|
|
|
|
|
torch.distributed.barrier(group=self.process_group)
|
|
|
|
super(Seq2SeqLM, self).__init__(
|
2023-05-16 15:23:27 -06:00
|
|
|
model=model,
|
2023-02-07 10:25:17 -07:00
|
|
|
tokenizer=tokenizer,
|
2023-04-21 07:36:29 -06:00
|
|
|
requires_padding=True,
|
|
|
|
dtype=dtype,
|
2023-02-07 10:25:17 -07:00
|
|
|
device=device,
|
2023-05-10 07:48:21 -06:00
|
|
|
rank=rank,
|
|
|
|
world_size=world_size,
|
2023-02-07 10:25:17 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
def forward(
|
|
|
|
self,
|
|
|
|
input_ids,
|
|
|
|
attention_mask,
|
|
|
|
decoder_input_ids,
|
|
|
|
decoder_attention_mask: Optional,
|
|
|
|
encoder_last_hidden_state: Optional,
|
|
|
|
past_key_values: Optional = None,
|
|
|
|
) -> Tuple[
|
|
|
|
torch.Tensor,
|
|
|
|
torch.Tensor,
|
|
|
|
List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]],
|
|
|
|
]:
|
|
|
|
# Model Forward
|
2024-02-26 11:49:28 -07:00
|
|
|
outputs, speculative_logits = self.model.forward(
|
2023-02-07 10:25:17 -07:00
|
|
|
input_ids=input_ids,
|
|
|
|
attention_mask=attention_mask,
|
|
|
|
decoder_input_ids=decoder_input_ids,
|
|
|
|
decoder_attention_mask=decoder_attention_mask,
|
|
|
|
encoder_outputs=encoder_last_hidden_state,
|
|
|
|
past_key_values=past_key_values,
|
|
|
|
use_cache=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
return (
|
2023-06-08 06:51:52 -06:00
|
|
|
outputs.logits,
|
2024-02-26 11:49:28 -07:00
|
|
|
speculative_logits,
|
2023-02-07 10:25:17 -07:00
|
|
|
outputs.encoder_last_hidden_state,
|
|
|
|
outputs.past_key_values,
|
|
|
|
)
|