This program shows how automatic differentiation finds gradients to train a simple model that learns y = 2x + 1.
import torch
import torch.nn as nn
import torch.optim as optim
# Simple model: y = wx + b
class SimpleModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
return self.linear(x)
# Data: x and y (y = 2x + 1)
x = torch.tensor([[1.0], [2.0], [3.0], [4.0]])
y = torch.tensor([[3.0], [5.0], [7.0], [9.0]])
model = SimpleModel()
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
for epoch in range(5):
optimizer.zero_grad() # Clear old gradients
output = model(x) # Predict
loss = criterion(output, y) # Calculate loss
loss.backward() # Compute gradients automatically
optimizer.step() # Update weights
print(f"Epoch {epoch+1}, Loss: {loss.item():.4f}")