fix: fix local loading for .bin models (#1419)
This commit is contained in:
parent
3f9b3f4539
commit
564f2a3b75
|
@ -198,7 +198,7 @@ def download_weights(
|
|||
if not extension == ".safetensors" or not auto_convert:
|
||||
raise e
|
||||
|
||||
else:
|
||||
elif (Path(model_id) / "adapter_config.json").exists():
|
||||
# Try to load as a local PEFT model
|
||||
try:
|
||||
utils.download_and_unload_peft(
|
||||
|
|
|
@ -10,8 +10,7 @@ from peft import AutoPeftModelForCausalLM, AutoPeftModelForSeq2SeqLM
|
|||
def download_and_unload_peft(model_id, revision, trust_remote_code):
|
||||
torch_dtype = torch.float16
|
||||
|
||||
logger.info("Peft model detected.")
|
||||
logger.info("Loading the model it might take a while without feedback")
|
||||
logger.info("Trying to load a Peft model. It might take a while without feedback")
|
||||
try:
|
||||
model = AutoPeftModelForCausalLM.from_pretrained(
|
||||
model_id,
|
||||
|
@ -28,7 +27,7 @@ def download_and_unload_peft(model_id, revision, trust_remote_code):
|
|||
trust_remote_code=trust_remote_code,
|
||||
low_cpu_mem_usage=True,
|
||||
)
|
||||
logger.info(f"Loaded.")
|
||||
logger.info("Peft model detected.")
|
||||
logger.info(f"Merging the lora weights.")
|
||||
|
||||
base_model_id = model.peft_config["default"].base_model_name_or_path
|
||||
|
|
Loading…
Reference in New Issue