import tensorflow as tf
# Load MNIST dataset
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# Normalize images to [0,1]
test_images = test_images.astype('float32') / 255.0
# Expand dims to add channel dimension
# Model expects shape (batch, 28, 28, 1)
test_images = test_images[..., tf.newaxis]
# Define the same model architecture used for training
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28,28,1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
# Load pretrained weights (simulate training by loading weights from a file or assume weights are loaded here)
# For this experiment, we will compile and load weights from a saved model if available
# Here, we simulate by compiling and assuming weights are loaded
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Normally, you would load weights like:
# model.load_weights('path_to_weights')
# For demonstration, we train briefly to simulate trained model
train_images = train_images.astype('float32') / 255.0
train_images = train_images[..., tf.newaxis]
model.fit(train_images, train_labels, epochs=1, batch_size=64, verbose=0)
# Predict on test data
predictions = model.predict(test_images)
# Convert predictions to label indices
predicted_labels = predictions.argmax(axis=1)
# Evaluate model on test data
loss, accuracy = model.evaluate(test_images, test_labels, verbose=0)
print(f"Test Loss: {loss:.4f}")
print(f"Test Accuracy: {accuracy*100:.2f}%")