-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_splade.py
More file actions
144 lines (119 loc) · 4.6 KB
/
train_splade.py
File metadata and controls
144 lines (119 loc) · 4.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import logging
import os
import sys
import torch
from transformers import AutoConfig, AutoTokenizer
from transformers import (
HfArgumentParser,
set_seed,
)
from dataclasses import dataclass, field
from tevatron.arguments import ModelArguments, DataArguments, TevatronTrainingArguments
from tevatron.data import TrainDataset, QPCollator
from tevatron.modeling import SpladeModel
from tevatron.trainer import TevatronTrainer
from tevatron.datasets import HFTrainDataset
from denserr.train.perturbed_train_dataset import (
PerturbedHFTrainDataset,
PerturbedTrainDataset,
)
from denserr.model.pt_splade_train import SpladeForTrain
logger = logging.getLogger(__name__)
@dataclass
class SpladeTrainingArguments(TevatronTrainingArguments):
q_flops_loss_factor: float = field(default=4)
p_flops_loss_factor: float = field(default=32)
class SpladeTrainer(TevatronTrainer):
def __init__(self, *args, **kwargs):
super(SpladeTrainer, self).__init__(*args, **kwargs)
if self.args.negatives_x_device:
self.world_size = torch.distributed.get_world_size()
@staticmethod
def _flops(inputs):
return torch.sum(torch.mean(torch.abs(inputs), dim=0) ** 2)
def compute_loss(self, model, inputs):
query, passage = inputs
output = model(query=query, passage=passage)
q_reps = output.q_reps
p_reps = output.p_reps
loss = output.loss
q_flops_loss = self.args.q_flops_loss_factor * self._flops(q_reps)
p_flops_loss = self.args.p_flops_loss_factor * self._flops(p_reps)
if self.args.negatives_x_device:
q_flops_loss *= self.world_size
p_flops_loss *= self.world_size
return loss + q_flops_loss + p_flops_loss
TrainingArguments = SpladeTrainingArguments
def main():
parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model_args: ModelArguments
data_args: DataArguments
training_args: TrainingArguments
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
logger.info("MODEL parameters %s", model_args)
set_seed(training_args.seed)
num_labels = 1
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False,
)
model = SpladeForTrain(model_args.model_name_or_path, agg="max")
train_dataset = PerturbedHFTrainDataset(
tokenizer=tokenizer,
data_args=data_args,
cache_dir=data_args.data_cache_dir or model_args.cache_dir,
)
if training_args.local_rank > 0:
print("Waiting for main process to perform the mapping")
torch.distributed.barrier()
train_dataset = PerturbedTrainDataset(data_args, train_dataset.process(), tokenizer)
if training_args.local_rank == 0:
print("Loading results from main process")
torch.distributed.barrier()
trainer = SpladeTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
data_collator=QPCollator(
tokenizer, max_p_len=data_args.p_max_len, max_q_len=data_args.q_max_len
),
)
train_dataset.trainer = trainer
trainer.train() # TODO: resume training
trainer.save_model()
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
if __name__ == "__main__":
main()