import random
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.callbacks import EarlyStopping
# Simulated training data (features and labels)
X_train = np.random.rand(1000, 20)
y_train = np.random.randint(0, 2, 1000)
# Simulated test data
X_test = np.random.rand(200, 20)
y_test = np.random.randint(0, 2, 200)
# Define model with dropout to reduce overfitting
model = Sequential([
Dense(64, activation='relu', input_shape=(20,)),
Dropout(0.3),
Dense(32, activation='relu'),
Dropout(0.3),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Early stopping to prevent overfitting
early_stop = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
# Train model with validation split
history = model.fit(X_train, y_train, epochs=50, batch_size=32, validation_split=0.2, callbacks=[early_stop], verbose=0)
# Evaluate on test data
loss, accuracy = model.evaluate(X_test, y_test, verbose=0)
# Simulate user satisfaction improvement by fine-tuning on small real-world data
X_real_world = np.random.rand(50, 20)
y_real_world = np.random.randint(0, 2, 50)
model.fit(X_real_world, y_real_world, epochs=5, batch_size=10, verbose=0)
# Final evaluation
final_loss, final_accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f'Test accuracy before fine-tuning: {accuracy:.2f}')
print(f'Test accuracy after fine-tuning: {final_accuracy:.2f}')