from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments
import numpy as np
from sklearn.metrics import accuracy_score
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return {"accuracy": accuracy_score(labels, predictions)}
# Load dataset
raw_datasets = load_dataset('imdb', split='train[:5%]').train_test_split(test_size=0.2)
# Load tokenizer and model
model_name = 'distilbert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
# Tokenize function
def tokenize_function(examples):
return tokenizer(examples['text'], padding='max_length', truncation=True, max_length=128)
# Tokenize datasets
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
# Set format for PyTorch
tokenized_datasets.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])
# Training arguments with lower learning rate, dropout, and early stopping
training_args = TrainingArguments(
output_dir='./results',
evaluation_strategy='epoch',
save_strategy='epoch',
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=4,
weight_decay=0.01,
load_best_model_at_end=True,
metric_for_best_model='accuracy',
save_total_limit=1,
seed=42
)
# Increase dropout rate by modifying the model config
model.config.dropout = 0.3
model.config.attention_dropout = 0.3
# Define Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets['train'],
eval_dataset=tokenized_datasets['test'],
compute_metrics=compute_metrics
)
# Train model
trainer.train()
# Evaluate model
metrics = trainer.evaluate()
print(metrics)