embed_out.weight 的大小不匹配:从检查点复制形状为 torch.Size([0]) 的参数 - Huggingface PyTorch

问题描述 投票:0回答:1

我想微调法学硕士。我能够成功调整LLM。但是当保存后重新加载模型时,会出现错误。下面是代码

import argparse
import numpy as np
import torch
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM

from trl import DPOTrainer, DPOConfig
def preprocess_data(item):
    return {
        'prompt': 'Instruct: ' + item['prompt'] + '\n',
        'chosen': 'Output: ' + item['chosen'],
        'rejected': 'Output: ' + item['rejected']
    }        

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--epochs", type=int, default=1)
    parser.add_argument("--beta", type=float, default=0.1)
    parser.add_argument("--batch_size", type=int, default=4)
    parser.add_argument("--lr", type=float, default=1e-6)
    parser.add_argument("--seed", type=int, default=2003)
    parser.add_argument("--model_name", type=str, default="EleutherAI/pythia-14m")
    parser.add_argument("--dataset_name", type=str, default="jondurbin/truthy-dpo-v0.1")
    parser.add_argument("--local_rank", type=int, default=0)

    args = parser.parse_args()

    # Determine device based on local_rank
    device = torch.device("cuda", args.local_rank) if torch.cuda.is_available() else torch.device("cpu")


    tokenizer = AutoTokenizer.from_pretrained(args.model_name)
    tokenizer.pad_token = tokenizer.eos_token
    model = AutoModelForCausalLM.from_pretrained(args.model_name).to(device)
    ref_model = AutoModelForCausalLM.from_pretrained(args.model_name).to(device)

    dataset = load_dataset(args.dataset_name, split="train")
    dataset = dataset.map(preprocess_data)

    # Split the dataset into training and validation sets
    dataset = dataset.train_test_split(test_size=0.1, seed=args.seed)
    train_dataset = dataset['train']
    val_dataset = dataset['test']

    training_args = DPOConfig(
        learning_rate=args.lr,
        num_train_epochs=args.epochs,
        per_device_train_batch_size=args.batch_size,
        logging_steps=10,
        remove_unused_columns=False,
        max_length=1024,
        max_prompt_length=512,
        fp16=True        
    )

    

    # Verify and print embedding dimensions before finetuning
    print("Base model embedding dimension:", model.config.hidden_size)

    model.train()
    ref_model.eval()

    dpo_trainer = DPOTrainer(
        model,
        ref_model,
        beta=args.beta,
        train_dataset=train_dataset,
        eval_dataset=val_dataset,
        tokenizer=tokenizer,
        args=training_args,
    )

    dpo_trainer.train()
    # Evaluate
    evaluation_results = dpo_trainer.evaluate()
    print("Evaluation Results:", evaluation_results)

    save_model_name = 'finetuned_model'
    model.save_pretrained(save_model_name)

if __name__ == "__main__":
    main()

我遇到的错误如下

    return model_class.from_pretrained(
    File "/.local/lib/python3.10/site-packages/transformers/modeling_utils.py", line 3838, in from_pretrained
        ) = cls._load_pretrained_model(
    File "/.local/lib/python3.10/site-packages/transformers/modeling_utils.py", line 4349, in _load_pretrained_model
        raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")
        RuntimeError: Error(s) in loading state_dict for GPTNeoXForCausalLM:
            size mismatch for gpt_neox.embed_in.weight: copying a param with shape torch.Size([0]) from checkpoint, the shape in current model is torch.Size([50304, 128]).
            size mismatch for embed_out.weight: copying a param with shape torch.Size([0]) from checkpoint, the shape in current model is torch.Size([50304, 128]).
            You may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.

微调后,模型完美运行。但重新加载保存的训练模型后,它不起作用。知道为什么在重新加载模型时会出现此错误吗?

pytorch huggingface-transformers large-language-model huggingface
1个回答
0
投票

而不是

model.save_pretrained(save_model_name)

试试这个

dpo_trainer.save_model(save_model_name)
© www.soinside.com 2019 - 2024. All rights reserved.