import numpy as np
# Sigmoid activation function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Forward propagation function
def forward_propagation(X, parameters):
W1, b1, W2, b2 = parameters['W1'], parameters['b1'], parameters['W2'], parameters['b2']
Z1 = np.dot(X, W1) + b1 # Linear step for hidden layer
A1 = sigmoid(Z1) # Activation for hidden layer
Z2 = np.dot(A1, W2) + b2 # Linear step for output layer
A2 = sigmoid(Z2) # Activation for output layer (prediction)
cache = {'Z1': Z1, 'A1': A1, 'Z2': Z2, 'A2': A2}
return A2, cache
# Example dataset (4 samples, 2 features)
X = np.array([[0,0],[0,1],[1,0],[1,1]])
# Example labels
Y = np.array([[0],[1],[1],[0]])
# Initialize parameters
np.random.seed(1)
W1 = np.random.randn(2,3) * 0.01
b1 = np.zeros((1,3))
W2 = np.random.randn(3,1) * 0.01
b2 = np.zeros((1,1))
parameters = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Forward propagation
A2, cache = forward_propagation(X, parameters)
# Convert predictions to binary output
predictions = (A2 > 0.5).astype(int)
# Calculate accuracy
accuracy = np.mean(predictions == Y) * 100
print(f"Predictions:\n{predictions}")
print(f"Training accuracy: {accuracy:.2f}%")