import tensorflow as tf
from tensorflow.keras import layers, models
import tensorflow_privacy
# Load example dataset (e.g., MNIST) for demonstration
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
X_train = X_train.reshape(-1, 28*28)
X_test = X_test.reshape(-1, 28*28)
# Define a simple neural network model
model = models.Sequential([
layers.InputLayer(input_shape=(28*28,)),
layers.Dense(128, activation='relu'),
layers.Dropout(0.3), # Added dropout to reduce overfitting
layers.Dense(10, activation='softmax')
])
# Use DP optimizer from tensorflow_privacy
from tensorflow_privacy.privacy.optimizers.dp_optimizer_keras import DPKerasAdamOptimizer
optimizer = DPKerasAdamOptimizer(
l2_norm_clip=1.0, # Clip gradients to limit sensitivity
noise_multiplier=1.1, # Add noise for privacy
num_microbatches=256,
learning_rate=0.001
)
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the model with differential privacy
history = model.fit(X_train, y_train,
epochs=15,
batch_size=256,
validation_split=0.2,
verbose=2)
# Evaluate on test data
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0)
print(f'Test accuracy: {test_acc:.4f}')