1. Feed Forward NW
    1. #layer initialization # Defining a simple unit with input layer and output layer from tf.keras.models import Sequential from tf.keras.models import Dense models = Sequential ([ Dense(64, activation='relu', input_shape=(784,)), Dense(10, activation='softmax') ]) (How many parameters in FFNW) (ixh + hxo) + (h+o)
      1. 1. model.weight works after given input 2. model.summary() nice way to see shapes and layer details
    2. #Define/Build a CNN model from tf.keras.models import Sequential from tf.keras.layers import Flatten, Dense, Conv2D, MaxPooling2D model = Sequential([ Conv2D(16, (3,3), activation='relu', input_shape=(28,28,1), name='conv_layer'), MaxPooling2D((3,3), name='max_pool'), Flatten(name='flatten'), Dense(10, activation='softmax', name = 'output_layer') ])
      1. #Compile Model #Weight initialization #Simple binary classification #Set optimizer with loss functions & metrics opt = tf.keras.optimizers.Adam(learning_rate=0.005) acc = tf.keras.metrics.SparseCategoricalAccuracy() mae = tf.keras.metrics.MeanAbsoluteError() model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=[acc, mae])
        1. #Train a Model (fit a model) #epoch is number of time run through the complete data/batch model.fit(X_train, y_train, epochs=10, batch_size=16) (model.fit actually returns an tensorflow history object) history = model.fit(X_train, y_train, epochs=10, batch_size=16)
  2. Apply knowledge of FFNW above in MNIST Fashion Dataset Project
    1. #Step A #Bring relevant libraries from tensorflow.keras.preprocessing import image import matplotlib.pyplot as plt import numpy as np import pandas as pd
      1. Define CNN model
      2. Compile the model
      3. fashion_mnist_data = tf.keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist_data.load_data()
        1. #Rescale image to make learning faster train_images = train_images/255. test_images = train_images/255.
      4. #Fit or train the model (batch size will reduce computation time history = model.fit(train_images[..., np.newaxis], train_labels, epochs=2, batch_size=256 history.history is a dictionary contain loss value after each epoch
        1. #Ideally loss should reduce Once training is done, and training loss are studied. We can then check how the model is doing on test dataset model.evaluate(test_images[..., np.newaxis], test_labels)
          1. #Now let's make prediction pred = model.predict(test_image[np.mewaxis, ...,np.newaxis]) #as we used softmax function np.argmax(pred)
        2. #Calculate now test loss test_loss, test_accuracy, test_mae = model.evaluate(test_images[..., np.newaxis], test_labels)
  3. Learning Direction ->