Merge pull request 'qwen_train' (#2) from qwen_train into main
Reviewed-on: #2
This commit is contained in:
commit
377cf77c16
4
.gitignore
vendored
4
.gitignore
vendored
@ -6,4 +6,6 @@ data
|
|||||||
*.json
|
*.json
|
||||||
models
|
models
|
||||||
*.log
|
*.log
|
||||||
research_notebook/data
|
research_notebook/data
|
||||||
|
train/qwen/output
|
||||||
|
train/qwen/mlruns
|
||||||
|
|||||||
@ -2,4 +2,9 @@ python-dotenv==1.1.1
|
|||||||
hazm=0.10.0
|
hazm=0.10.0
|
||||||
faiss-cpu==1.12.0
|
faiss-cpu==1.12.0
|
||||||
sentence-transformers==5.1.2
|
sentence-transformers==5.1.2
|
||||||
einops==0.8.1
|
einops==0.8.1
|
||||||
|
ms-swift
|
||||||
|
mteb==1.39.7
|
||||||
|
transformers
|
||||||
|
liger-kernel
|
||||||
|
deepspeed
|
||||||
35
train/qwen/a.sh
Normal file
35
train/qwen/a.sh
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# pip install flash-attn --no-build-isolation
|
||||||
|
|
||||||
|
|
||||||
|
nproc_per_node=1
|
||||||
|
|
||||||
|
MLFLOW_TRACKING_URI=http://0.0.0.0:5004 \
|
||||||
|
INFONCE_USE_BATCH=False \
|
||||||
|
CUDA_VISIBLE_DEVICES=0 \
|
||||||
|
NPROC_PER_NODE=$nproc_per_node \
|
||||||
|
swift sft \
|
||||||
|
--model $(pwd)/../../data/models/Qwen3-Embedding-0.6B/model \
|
||||||
|
--task_type embedding \
|
||||||
|
--model_type qwen3_emb \
|
||||||
|
--train_type lora \
|
||||||
|
--lora_rank 8 \
|
||||||
|
--lora_alpha 16 \
|
||||||
|
--target_modules all-linear \
|
||||||
|
--dataset my_local_dataset \
|
||||||
|
--custom_register_path $(pwd)/../../data/dataset/my_dataset_register.py \
|
||||||
|
--split_dataset_ratio 0.005 \
|
||||||
|
--eval_strategy steps \
|
||||||
|
--output_dir output \
|
||||||
|
--eval_steps 1000 \
|
||||||
|
--num_train_epochs 1 \
|
||||||
|
--save_steps 1000 \
|
||||||
|
--save_total_limit 10 \
|
||||||
|
--per_device_train_batch_size 16 \
|
||||||
|
--per_device_eval_batch_size 16 \
|
||||||
|
--gradient_accumulation_steps 4 \
|
||||||
|
--learning_rate 2.4e-5 \
|
||||||
|
--loss_type infonce \
|
||||||
|
--label_names labels \
|
||||||
|
--dataloader_drop_last true \
|
||||||
|
--deepspeed zero3 \
|
||||||
|
--report_to mlflow
|
||||||
24
train/qwen/merge_model.py
Normal file
24
train/qwen/merge_model.py
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
from peft import PeftModel
|
||||||
|
import torch
|
||||||
|
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
||||||
|
|
||||||
|
def merge(base_model_path, peft_model_path, save_path):
|
||||||
|
base_model = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype="bfloat16")
|
||||||
|
ft_model = PeftModel.from_pretrained(base_model, peft_model_path)
|
||||||
|
ft_model = ft_model.merge_and_unload()
|
||||||
|
ft_model.save_pretrained(save_path)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
file_path = os.path.dirname(__file__)
|
||||||
|
|
||||||
|
base_model_path = file_path + "/../../data/models/Qwen3-Embedding-0.6B/model"
|
||||||
|
peft_model_path = file_path + "/output/v1-20251122-184545/checkpoint-3434"
|
||||||
|
save_path = file_path + "/output/v1-20251122-184545/merged_checkpoint-3434"
|
||||||
|
merge(base_model_path, peft_model_path, save_path)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Loading…
x
Reference in New Issue
Block a user