1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TrainingArguments,
Trainer
)
from peft import LoraConfig, get_peft_model, TaskType
import torch
# 1. 加载模型
model_name = "meta-llama/Llama-2-7b"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="auto"
)
# 2. 配置LoRA
lora_config = LoraConfig(
r=8, # LoRA秩(越大效果越好,但更慢)
lora_alpha=16, # LoRA alpha参数
target_modules=[ # 目标模块
"q_proj",
"v_proj",
"k_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj"
],
lora_dropout=0.05,
bias="none",
task_type=TaskType.CAUSAL_LM
)
# 3. 应用LoRA
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
# 可训练参数: ~0.1%(原始7B参数的千分之一)
# 4. 训练参数
training_args = TrainingArguments(
output_dir="./lora-output",
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
learning_rate=2e-4, # LoRA可以用更高的学习率
num_train_epochs=3,
fp16=True,
save_steps=100,
logging_steps=10,
)
# 5. 训练
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset,
)
trainer.train()
# 6. 合并权重并保存
model = model.merge_and_unload()
model.save_pretrained("./final-model")
|