-
Notifications
You must be signed in to change notification settings - Fork 0
/
train-deepspeed.py
101 lines (87 loc) · 3.13 KB
/
train-deepspeed.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, TrainerCallback
from huggingface_hub import login
from datasets import load_dataset
import deepspeed
import os
import time
# Login to Hugging Face Hub and Weights and Biases
hf_token = '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'
login(token=hf_token)
# Model configuration
model_id = "meta-llama/Llama-3.2-3B"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_id)
if tokenizer.pad_token is None:
tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token})
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16
)
model.gradient_checkpointing_enable()
# Initialize DeepSpeed
ds_config = "ds_config.json"
# Prepare the dataset
dataset = load_dataset('cnn_dailymail', '3.0.0')
small_dataset = dataset['train'].select(range(5000))
train_val_split = small_dataset.train_test_split(test_size=0.1)
train_dataset = train_val_split['train']
validation_dataset = train_val_split['test']
# Tokenization
def tokenize_function(examples):
inputs = tokenizer(
examples["article"],
return_tensors="pt",
truncation=True,
padding="max_length",
max_length=512
)
labels = tokenizer(
examples["highlights"],
return_tensors="pt",
truncation=True,
padding="max_length",
max_length=512
)
labels["input_ids"] = torch.roll(labels["input_ids"], shifts=-1, dims=-1)
labels["input_ids"][:, -1] = tokenizer.pad_token_id
# Convert tensors to lists for compatibility with datasets.map
return {
"input_ids": inputs["input_ids"].tolist(),
"attention_mask": inputs["attention_mask"].tolist(),
"labels": labels["input_ids"].tolist(),
}
tokenized_train_dataset = train_dataset.map(tokenize_function, batched=True)
tokenized_val_dataset = validation_dataset.map(tokenize_function, batched=True)
# Training arguments with DeepSpeed
training_args = TrainingArguments(
output_dir="/work/nvme/bdof/nkanamarla",
num_train_epochs=1,
save_steps=1,
save_total_limit=10, # Remove later to not save space
fp16=True,
logging_dir="/projects/bdof/nkanamarla/deepspeed-logs",
deepspeed=ds_config, # Use DeepSpeed config
remove_unused_columns=False,
save_strategy="steps", # Save at every step
evaluation_strategy="epoch"
)
# Custom Callback for Profiling
class CheckpointTimeCallback(TrainerCallback):
def on_train_begin(self, args, state, control, **kwargs):
if state.is_world_process_zero:
print("Training started.")
def on_save(self, args, state, control, **kwargs):
if state.is_world_process_zero:
control = super().on_save(args, state, control, **kwargs) # Perform the actual save and get output
return control
# Initialize Trainer with DeepSpeed and Custom Callback
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_train_dataset,
eval_dataset=tokenized_val_dataset,
callbacks=[CheckpointTimeCallback()], # Add the custom callback here
)
# Training loop
trainer.train()