import tensorflow as tf
from tensorflow.keras import layers, models
# Load example dataset (replace with actual data loading)
(X_train, y_train), (X_val, y_val) = tf.keras.datasets.cifar10.load_data()
# Filter dataset to 5 classes for simplicity
import numpy as np
classes_to_keep = [0,1,2,3,4]
train_filter = np.isin(y_train, classes_to_keep).flatten()
val_filter = np.isin(y_val, classes_to_keep).flatten()
X_train, y_train = X_train[train_filter], y_train[train_filter]
X_val, y_val = X_val[val_filter], y_val[val_filter]
# Convert labels to categorical
num_classes = 5
y_train_cat = tf.keras.utils.to_categorical(y_train, num_classes)
y_val_cat = tf.keras.utils.to_categorical(y_val, num_classes)
# Normalize images
X_train = X_train.astype('float32') / 255.0
X_val = X_val.astype('float32') / 255.0
# Define model with dropout and reduced complexity
model = models.Sequential([
layers.Conv2D(32, (3,3), activation='relu', input_shape=X_train.shape[1:]),
layers.MaxPooling2D((2,2)),
layers.Dropout(0.25),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_classes, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Use early stopping
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
history = model.fit(X_train, y_train_cat, epochs=50, batch_size=64,
validation_data=(X_val, y_val_cat), callbacks=[early_stop])