import tensorflow as tf
from tensorflow.keras import layers, models
# Load example dataset
(train_images, train_labels), (val_images, val_labels) = tf.keras.datasets.cifar10.load_data()
# Normalize images
train_images = train_images.astype('float32') / 255.0
val_images = val_images.astype('float32') / 255.0
# Define data augmentation pipeline
data_augmentation = tf.keras.Sequential([
layers.RandomFlip('horizontal'),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
])
# Build model
model = models.Sequential([
layers.Input(shape=(32, 32, 3)),
data_augmentation, # Apply augmentation here
layers.Conv2D(32, (3, 3), activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train model
history = model.fit(train_images, train_labels, epochs=10, batch_size=64, validation_data=(val_images, val_labels))