import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, GlobalAveragePooling1D, Dense, Dropout
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.callbacks import EarlyStopping
# Sample data
texts = ["I love this movie", "This movie is terrible", "Amazing film", "Worst movie ever", "I enjoyed it", "Not good", "Fantastic acting", "Bad plot"]
labels = [1, 0, 1, 0, 1, 0, 1, 0]
# Tokenize and pad sequences
max_words = 1000
max_len = 10
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
data = pad_sequences(sequences, maxlen=max_len)
# Convert labels to tensor
labels = tf.convert_to_tensor(labels)
# Build model with dropout
model = Sequential([
Embedding(max_words, 16, input_length=max_len),
GlobalAveragePooling1D(),
Dropout(0.5),
Dense(16, activation='relu'),
Dropout(0.5),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Early stopping callback
early_stop = EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True)
# Train model with validation split
history = model.fit(data, labels, epochs=10, batch_size=2, validation_split=0.25, callbacks=[early_stop])
# Evaluate final metrics
train_acc = history.history['accuracy'][-1] * 100
val_acc = history.history['val_accuracy'][-1] * 100
train_loss = history.history['loss'][-1]
val_loss = history.history['val_loss'][-1]
print(f"Training accuracy: {train_acc:.2f}%, Validation accuracy: {val_acc:.2f}%")
print(f"Training loss: {train_loss:.3f}, Validation loss: {val_loss:.3f}")