import tensorflow as tf
from tensorflow.keras import layers, models
import tensorflow_datasets as tfds
import tensorflow_addons as tfa
# Load CIFAR-10 dataset
(ds_train, ds_test), ds_info = tfds.load('cifar10', split=['train', 'test'], as_supervised=True, with_info=True)
# Normalize images
def normalize_img(image, label):
return tf.cast(image, tf.float32) / 255.0, label
# AutoAugment policy from TensorFlow Addons
policy = tfa.image.autoaugment.Cifar10Policy()
# Apply AutoAugment
def augment(image, label):
image = policy(image)
return image, label
# Prepare training dataset with AutoAugment
batch_size = 64
train_ds = ds_train.map(normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.map(augment, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.shuffle(1000).batch(batch_size).prefetch(tf.data.AUTOTUNE)
# Prepare test dataset
test_ds = ds_test.map(normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
test_ds = test_ds.batch(batch_size).prefetch(tf.data.AUTOTUNE)
# Define the model (simple CNN)
model = models.Sequential([
layers.Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)),
layers.MaxPooling2D(),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = model.fit(train_ds, epochs=50, validation_data=test_ds)
# Evaluate final metrics
train_loss, train_acc = model.evaluate(train_ds, verbose=0)
val_loss, val_acc = model.evaluate(test_ds, verbose=0)
print(f'Training accuracy: {train_acc*100:.2f}%, Validation accuracy: {val_acc*100:.2f}%')