from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Simulated predictions and true labels for demonstration
true_labels = [0, 1, 0, 1, 1, 0, 1, 0, 1, 0]
base_model_preds = [0, 1, 0, 0, 1, 0, 1, 0, 1, 1]
fine_tuned_preds = [0, 1, 0, 1, 1, 0, 1, 0, 1, 0]
# Calculate metrics for base model
base_accuracy = accuracy_score(true_labels, base_model_preds) * 100
base_precision = precision_score(true_labels, base_model_preds) * 100
base_recall = recall_score(true_labels, base_model_preds) * 100
base_f1 = f1_score(true_labels, base_model_preds) * 100
# Calculate metrics for fine-tuned model
ft_accuracy = accuracy_score(true_labels, fine_tuned_preds) * 100
ft_precision = precision_score(true_labels, fine_tuned_preds) * 100
ft_recall = recall_score(true_labels, fine_tuned_preds) * 100
ft_f1 = f1_score(true_labels, fine_tuned_preds) * 100
print(f"Base Model - Accuracy: {base_accuracy:.1f}%, Precision: {base_precision:.1f}%, Recall: {base_recall:.1f}%, F1-score: {base_f1:.1f}%")
print(f"Fine-tuned Model - Accuracy: {ft_accuracy:.1f}%, Precision: {ft_precision:.1f}%, Recall: {ft_recall:.1f}%, F1-score: {ft_f1:.1f}%")