import torch
import torch.nn as nn
import torch.optim as optim
class SimpleRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout=0.3):
super().__init__()
self.rnn = nn.RNN(input_size, hidden_size, num_layers=2, batch_first=True, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, _ = self.rnn(x)
out = self.dropout(out[:, -1, :])
out = self.fc(out)
return out
# Example training loop setup
input_size = 10
hidden_size = 32 # Reduced from larger size
output_size = 2
model = SimpleRNN(input_size, hidden_size, output_size, dropout=0.3)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001) # Reduced learning rate
# Dummy data for demonstration
X_train = torch.randn(100, 5, input_size)
y_train = torch.randint(0, output_size, (100,))
X_val = torch.randn(30, 5, input_size)
y_val = torch.randint(0, output_size, (30,))
# Training with early stopping
best_val_acc = 0
patience = 3
trigger_times = 0
for epoch in range(30):
model.train()
optimizer.zero_grad()
outputs = model(X_train)
loss = criterion(outputs, y_train)
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
val_outputs = model(X_val)
val_loss = criterion(val_outputs, y_val)
_, predicted = torch.max(val_outputs, 1)
val_acc = (predicted == y_val).float().mean().item() * 100
train_acc = (outputs.argmax(dim=1) == y_train).float().mean().item() * 100
if val_acc > best_val_acc:
best_val_acc = val_acc
trigger_times = 0
else:
trigger_times += 1
if trigger_times >= patience:
break
print(f"Training accuracy: {train_acc:.2f}%, Validation accuracy: {best_val_acc:.2f}%")