import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, Dense, Dropout
from tensorflow.keras.callbacks import EarlyStopping
# Generate simple time series data
np.random.seed(42)
time_steps = 50
features = 1
# Create a sine wave with noise
x = np.linspace(0, 100, 1000)
y = np.sin(x) + 0.1 * np.random.randn(1000)
# Prepare data for RNN
sequence_length = 10
X = []
Y = []
for i in range(len(y) - sequence_length):
X.append(y[i:i+sequence_length])
Y.append(y[i+sequence_length])
X = np.array(X).reshape(-1, sequence_length, features)
Y = np.array(Y)
# Split into train and validation
split = int(len(X) * 0.8)
X_train, X_val = X[:split], X[split:]
Y_train, Y_val = Y[:split], Y[split:]
# Build model with dropout and fewer units
model = Sequential([
SimpleRNN(20, activation='tanh', return_sequences=False, input_shape=(sequence_length, features)),
Dropout(0.3),
Dense(1)
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='mse', metrics=['mae'])
# Early stopping callback
early_stop = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
# Train model
history = model.fit(X_train, Y_train, epochs=50, batch_size=32, validation_data=(X_val, Y_val), callbacks=[early_stop], verbose=0)
# Evaluate
train_loss, train_mae = model.evaluate(X_train, Y_train, verbose=0)
val_loss, val_mae = model.evaluate(X_val, Y_val, verbose=0)
# Convert MAE to a simple accuracy-like metric for demonstration
train_accuracy = max(0, 100 - train_mae * 100)
val_accuracy = max(0, 100 - val_mae * 100)
print(f'Training accuracy: {train_accuracy:.2f}%')
print(f'Validation accuracy: {val_accuracy:.2f}%')