import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
# Simple model
class SimpleNet(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(20, 50)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(50, 2)
def forward(self, x):
x = self.relu(self.fc1(x))
return self.fc2(x)
# Generate dummy data
X_train = torch.randn(500, 20)
y_train = torch.randint(0, 2, (500,))
X_val = torch.randn(100, 20)
y_val = torch.randint(0, 2, (100,))
train_ds = TensorDataset(X_train, y_train)
val_ds = TensorDataset(X_val, y_val)
train_dl = DataLoader(train_ds, batch_size=32, shuffle=True)
val_dl = DataLoader(val_ds, batch_size=32)
model = SimpleNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Add ReduceLROnPlateau scheduler
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3, verbose=True)
epochs = 20
for epoch in range(1, epochs+1):
model.train()
for xb, yb in train_dl:
optimizer.zero_grad()
preds = model(xb)
loss = criterion(preds, yb)
loss.backward()
optimizer.step()
model.eval()
val_loss = 0
correct = 0
total = 0
with torch.no_grad():
for xb, yb in val_dl:
preds = model(xb)
loss_val = criterion(preds, yb)
val_loss += loss_val.item() * xb.size(0)
predicted = preds.argmax(dim=1)
correct += (predicted == yb).sum().item()
total += yb.size(0)
val_loss /= total
val_acc = correct / total * 100
# Step scheduler with validation loss
scheduler.step(val_loss)
print(f"Epoch {epoch}: Val Loss={val_loss:.4f}, Val Acc={val_acc:.2f}%, LR={optimizer.param_groups[0]['lr']:.5f}")