This code creates a simple GAN with a generator and discriminator. It trains both for one step using random data to show how losses change.
import torch
import torch.nn as nn
import torch.optim as optim
# Simple Generator
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Linear(100, 256),
nn.ReLU(True),
nn.Linear(256, 784),
nn.Tanh()
)
def forward(self, x):
return self.model(x)
# Simple Discriminator
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Linear(784, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, x):
return self.model(x)
# Initialize models
G = Generator()
D = Discriminator()
# Loss and optimizers
criterion = nn.BCELoss()
optimizer_G = optim.Adam(G.parameters(), lr=0.001)
optimizer_D = optim.Adam(D.parameters(), lr=0.001)
# Training data: random real images (for demo)
real_images = torch.randn(16, 784)
real_labels = torch.ones(16, 1)
fake_labels = torch.zeros(16, 1)
# Training loop for 1 epoch
for epoch in range(1):
# Train Discriminator
optimizer_D.zero_grad()
outputs_real = D(real_images)
loss_real = criterion(outputs_real, real_labels)
noise = torch.randn(16, 100)
fake_images = G(noise)
outputs_fake = D(fake_images.detach())
loss_fake = criterion(outputs_fake, fake_labels)
loss_D = loss_real + loss_fake
loss_D.backward()
optimizer_D.step()
# Train Generator
optimizer_G.zero_grad()
outputs = D(fake_images)
loss_G = criterion(outputs, real_labels)
loss_G.backward()
optimizer_G.step()
print(f"Epoch {epoch+1} | Loss D: {loss_D.item():.4f} | Loss G: {loss_G.item():.4f}")