This example shows how ReduceLROnPlateau lowers the learning rate when the loss stops improving for 2 epochs.
import torch
import torch.nn as nn
import torch.optim as optim
# Simple model
model = nn.Linear(10, 1)
# Optimizer
optimizer = optim.SGD(model.parameters(), lr=0.1)
# Scheduler to reduce LR when loss plateaus
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, verbose=True)
# Dummy data
inputs = torch.randn(5, 10)
targets = torch.randn(5, 1)
criterion = nn.MSELoss()
losses = [0.5, 0.4, 0.4, 0.4, 0.3, 0.3, 0.3]
for epoch, loss_val in enumerate(losses, 1):
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# Instead of real loss, we simulate loss_val to test scheduler
scheduler.step(loss_val)
print(f"Epoch {epoch}, Loss: {loss_val}, Learning Rate: {optimizer.param_groups[0]['lr']}")