import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models, callbacks
# Generate synthetic data
np.random.seed(42)
X_train = np.random.rand(1000, 64, 64, 1).astype(np.float32)
y_train = np.random.rand(1000, 64, 64, 1).astype(np.float32)
X_val = np.random.rand(200, 64, 64, 1).astype(np.float32)
y_val = np.random.rand(200, 64, 64, 1).astype(np.float32)
# Define model with dropout to reduce overfitting
model = models.Sequential([
layers.Conv2D(16, (3,3), activation='relu', padding='same', input_shape=(64,64,1)),
layers.MaxPooling2D((2,2)),
layers.Dropout(0.3),
layers.Conv2D(32, (3,3), activation='relu', padding='same'),
layers.MaxPooling2D((2,2)),
layers.Dropout(0.3),
layers.Conv2D(64, (3,3), activation='relu', padding='same'),
layers.UpSampling2D((2,2)),
layers.Dropout(0.3),
layers.Conv2D(32, (3,3), activation='relu', padding='same'),
layers.UpSampling2D((2,2)),
layers.Conv2D(1, (3,3), activation='sigmoid', padding='same')
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='mse')
# Early stopping callback
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
# Train model
history = model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_val, y_val), callbacks=[early_stop])