This code builds a small U-Net, trains it on random data for 2 rounds, and shows loss, accuracy, and prediction shape.
import numpy as np
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
# Define U-Net model
def unet(input_shape):
inputs = Input(input_shape)
c1 = Conv2D(16, 3, activation='relu', padding='same')(inputs)
c1 = Conv2D(16, 3, activation='relu', padding='same')(c1)
p1 = MaxPooling2D()(c1)
c2 = Conv2D(32, 3, activation='relu', padding='same')(p1)
c2 = Conv2D(32, 3, activation='relu', padding='same')(c2)
p2 = MaxPooling2D()(c2)
c3 = Conv2D(64, 3, activation='relu', padding='same')(p2)
c3 = Conv2D(64, 3, activation='relu', padding='same')(c3)
u4 = Conv2DTranspose(32, 2, strides=2, padding='same')(c3)
u4 = concatenate([u4, c2])
c4 = Conv2D(32, 3, activation='relu', padding='same')(u4)
c4 = Conv2D(32, 3, activation='relu', padding='same')(c4)
u5 = Conv2DTranspose(16, 2, strides=2, padding='same')(c4)
u5 = concatenate([u5, c1])
c5 = Conv2D(16, 3, activation='relu', padding='same')(u5)
c5 = Conv2D(16, 3, activation='relu', padding='same')(c5)
outputs = Conv2D(1, 1, activation='sigmoid')(c5)
model = Model(inputs, outputs)
return model
# Create model
model = unet((64, 64, 1))
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
# Create dummy data: 10 images 64x64 with 1 channel
x_train = np.random.rand(10, 64, 64, 1).astype(np.float32)
y_train = (x_train > 0.5).astype(np.float32) # dummy masks
# Train model for 2 epochs
history = model.fit(x_train, y_train, epochs=2, batch_size=2, verbose=0)
# Predict on one image
pred = model.predict(x_train[:1])
print(f"Loss after 2 epochs: {history.history['loss'][-1]:.4f}")
print(f"Accuracy after 2 epochs: {history.history['accuracy'][-1]:.4f}")
print(f"Prediction shape: {pred.shape}")