import tensorflow as tf
# Scalar (0D tensor) - a single number
scalar = tf.constant(7)
print(f"Scalar (0D tensor): {scalar}, shape: {scalar.shape}")
# Vector (1D tensor) - a list of numbers
vector = tf.constant([1, 2, 3])
print(f"Vector (1D tensor): {vector}, shape: {vector.shape}")
# Matrix (2D tensor) - a table of numbers
matrix = tf.constant([[1, 2, 3], [4, 5, 6]])
print(f"Matrix (2D tensor):\n{matrix}\nshape: {matrix.shape}")
# 4D tensor - for example, a batch of 2 images, each 2x3 pixels with 1 color channel
images = tf.constant(
[
[[[1], [2], [3]], [[4], [5], [6]]],
[[[7], [8], [9]], [[10], [11], [12]]]
]
)
print(f"4D tensor (batch of images):\n{images}\nshape: {images.shape}")
# Show that TensorFlow operations work on tensors
sum_vector = tf.reduce_sum(vector)
print(f"Sum of vector elements: {sum_vector}")
# Explanation printout
print("\nTensors can hold data in many dimensions, making them perfect for all kinds of data like numbers, lists, tables, images, and batches.")
print("TensorFlow uses tensors because they allow fast math operations on these data structures, which is essential for machine learning.")