hf_text-generation-inference/server/text_generation/models/__init__.py

61 lines
2.1 KiB
Python
Raw Normal View History

2023-01-20 04:24:39 -07:00
import torch
2023-01-31 10:53:56 -07:00
from transformers import AutoConfig
from typing import Optional
from text_generation.models.model import Model
from text_generation.models.causal_lm import CausalLM
2022-12-08 10:49:33 -07:00
from text_generation.models.bloom import BLOOM, BLOOMSharded
from text_generation.models.seq2seq_lm import Seq2SeqLM
2022-12-01 11:31:54 -07:00
from text_generation.models.galactica import Galactica, GalacticaSharded
2023-01-20 04:24:39 -07:00
from text_generation.models.santacoder import SantaCoder
2023-01-31 10:53:56 -07:00
from text_generation.models.gpt_neox import GPTNeox, GPTNeoxSharded
2023-01-20 04:24:39 -07:00
__all__ = [
"Model",
"BLOOM",
"BLOOMSharded",
"CausalLM",
"Seq2SeqLM",
"SantaCoder",
"get_model",
]
# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
# in PyTorch 1.12 and later.
torch.backends.cuda.matmul.allow_tf32 = True
2023-01-20 04:24:39 -07:00
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
2023-01-31 10:53:56 -07:00
def get_model(
model_id: str, revision: Optional[str], sharded: bool, quantize: bool
2023-01-31 10:53:56 -07:00
) -> Model:
config = AutoConfig.from_pretrained(model_id, revision=revision)
2023-01-31 10:53:56 -07:00
if config.model_type == "bloom":
if sharded:
return BLOOMSharded(model_id, revision, quantize=quantize)
2023-01-31 10:53:56 -07:00
else:
return BLOOM(model_id, revision, quantize=quantize)
2023-01-31 10:53:56 -07:00
elif config.model_type == "gpt_neox":
if sharded:
return GPTNeoxSharded(model_id, revision, quantize=quantize)
else:
return GPTNeox(model_id, revision, quantize=quantize)
elif model_id.startswith("facebook/galactica"):
2022-12-01 11:31:54 -07:00
if sharded:
return GalacticaSharded(model_id, revision, quantize=quantize)
2022-12-01 11:31:54 -07:00
else:
return Galactica(model_id, revision, quantize=quantize)
elif "santacoder" in model_id:
return SantaCoder(model_id, revision, quantize)
else:
if sharded:
raise ValueError("sharded is not supported for AutoModel")
try:
return CausalLM(model_id, revision, quantize=quantize)
except Exception:
return Seq2SeqLM(model_id, revision, quantize=quantize)