import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, GlobalAveragePooling1D, Dense
# Sample data placeholders
vocab_size = 10000
max_length = 100
# Reduced embedding dimension from 300 to 100
embedding_dim = 100
model = Sequential([
Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=max_length),
GlobalAveragePooling1D(),
Dense(16, activation='relu'),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Assume X_train, y_train, X_val, y_val are preloaded datasets
# For demonstration, using dummy data
import numpy as np
X_train = np.random.randint(0, vocab_size, size=(1000, max_length))
y_train = np.random.randint(0, 2, size=(1000,))
X_val = np.random.randint(0, vocab_size, size=(200, max_length))
y_val = np.random.randint(0, 2, size=(200,))
history = model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_val, y_val))