import time
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
# Set random seeds for reproducibility
np.random.seed(42)
tf.random.set_seed(42)
torch.manual_seed(42)
# Common parameters
batch_size = 64
learning_rate = 0.001
num_epochs = 10
# Data preprocessing for TensorFlow
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
x_train = np.expand_dims(x_train, -1) # Add channel dimension
x_test = np.expand_dims(x_test, -1)
# TensorFlow dataset
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(batch_size)
val_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
# TensorFlow model
tf_model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28,1)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
# Compile model
tf_model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
# Train TensorFlow model
start_tf = time.time()
tf_history = tf_model.fit(train_ds, epochs=num_epochs, validation_data=val_ds, verbose=0)
end_tf = time.time()
# PyTorch data preprocessing
transform = transforms.Compose([
transforms.ToTensor()
])
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
val_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
# PyTorch model
class Net(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(28*28, 128)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
pytorch_model = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(pytorch_model.parameters(), lr=learning_rate)
# Training loop for PyTorch
start_pt = time.time()
for epoch in range(num_epochs):
pytorch_model.train()
for data, target in train_loader:
optimizer.zero_grad()
output = pytorch_model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
end_pt = time.time()
# Validation accuracy for PyTorch
pytorch_model.eval()
correct = 0
total = 0
with torch.no_grad():
for data, target in val_loader:
output = pytorch_model(data)
pred = output.argmax(dim=1)
correct += (pred == target).sum().item()
total += target.size(0)
pytorch_val_acc = 100 * correct / total
tf_val_acc = tf_history.history['val_accuracy'][-1] * 100
tf_train_time = end_tf - start_tf
pt_train_time = end_pt - start_pt
print(f"TensorFlow validation accuracy: {tf_val_acc:.2f}%")
print(f"PyTorch validation accuracy: {pytorch_val_acc:.2f}%")
print(f"TensorFlow training time: {tf_train_time:.2f} seconds")
print(f"PyTorch training time: {pt_train_time:.2f} seconds")