import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Define a simple SSD-like model with dropout
input_shape = (300, 300, 3)
num_classes = 21 # Example for VOC dataset
inputs = layers.Input(shape=input_shape)
# Base convolutional layers
x = layers.Conv2D(32, 3, activation='relu', padding='same')(inputs)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
x = layers.MaxPooling2D(2)(x)
# Add dropout to reduce overfitting
x = layers.Dropout(0.3)(x)
# Additional convolutional layers
x = layers.Conv2D(128, 3, activation='relu', padding='same')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Dropout(0.3)(x)
# Prediction layers (simplified for demonstration)
x = layers.Flatten()(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.4)(x)
outputs = layers.Dense(num_classes, activation='softmax')(x)
model = models.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Data augmentation setup
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
validation_split=0.2
)
# Assuming train_dir contains training images organized in class folders
train_generator = train_datagen.flow_from_directory(
'train_dir',
target_size=(300, 300),
batch_size=32,
class_mode='categorical',
subset='training'
)
val_datagen = ImageDataGenerator(
rescale=1./255,
validation_split=0.2
)
validation_generator = val_datagen.flow_from_directory(
'train_dir',
target_size=(300, 300),
batch_size=32,
class_mode='categorical',
subset='validation'
)
# Train the model with fewer epochs
history = model.fit(
train_generator,
epochs=15,
validation_data=validation_generator
)