In [ ]:
# Fashion Image classification (fashion_mnist) using a LaNet Architecture in Python 

def Snippet_364(): 

    print()
    print(format('Fashion Image classification (fashion_mnist) using a LaNet Architecture in Python','*^102'))

    import time
    start_time = time.time()

    from keras.datasets import fashion_mnist
    from keras.utils import np_utils
    from keras.models import Sequential
    from keras.layers.core import Dense, Activation, Flatten
    from keras.layers.convolutional import Conv2D, MaxPooling2D
    from keras.optimizers import RMSprop
    import matplotlib.pyplot as plt

    # MNIST is a set of 70K images 28x28 pixels 
    IMG_CHANNELS = 1; IMG_ROWS = 28; IMG_COLS = 28;

    #constant
    BATCH_SIZE = 128; NB_EPOCH = 5; NB_CLASSES = 10; VERBOSE = 1; VALIDATION_SPLIT = 0.2;

    #load dataset
    (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()

    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # Reshape Input
    X_train = X_train.reshape(X_train.shape[0], IMG_ROWS, IMG_COLS, IMG_CHANNELS)
    X_test  = X_test.reshape(X_test.shape[0], IMG_ROWS, IMG_COLS, IMG_CHANNELS)    

    # convert to categorical
    Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
    Y_test = np_utils.to_categorical(y_test, NB_CLASSES) 

    # float and normalization
    X_train = X_train.astype('float32'); X_test = X_test.astype('float32');
    X_train /= 255; X_test /= 255;

    # --------------------------------------
    # LeNet network Architecture in Keras
    # --------------------------------------    

    model = Sequential()

    # CONV => RELU => POOL
    model.add(Conv2D(filters = 20, kernel_size=5, padding="same",
                    input_shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # CONV => RELU => POOL
    model.add(Conv2D(50, kernel_size=5, padding="same"))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # Flatten => RELU layers
    model.add(Flatten())
    model.add(Dense(units = 500))
    model.add(Activation("relu"))

    # a softmax classifier
    model.add(Dense(NB_CLASSES))
    model.add(Activation("softmax"))

    model.summary()

    # train
    model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])

    history = model.fit(X_train, Y_train, batch_size=BATCH_SIZE,
           epochs=NB_EPOCH, validation_split=VALIDATION_SPLIT, verbose=VERBOSE)
 
    print('Testing...')
    score = model.evaluate(X_test, Y_test, batch_size=BATCH_SIZE, verbose=VERBOSE)
    print("\nTest score:", score[0]); print('Test accuracy:', score[1]);

    # save model
    model_json = model.to_json()
    open('mnist_architecture.json', 'w').write(model_json)
    model.save_weights('mnist_weights.h5', overwrite=True)

    # list all data in history
    print(history.history.keys())
    print(history.history)    

    # summarize history for accuracy
    plt.plot(history.history['accuracy'])    
    plt.plot(history.history['val_accuracy'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()

    # summarize history for loss
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()

    print()
    print("Execution Time %s seconds: " % (time.time() - start_time))    

Snippet_364()
**********Fashion Image classification (fashion_mnist) using a LaNet Architecture in Python***********
Using TensorFlow backend.
X_train shape: (60000, 28, 28)
60000 train samples
10000 test samples
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 28, 28, 20)        520       
_________________________________________________________________
activation_1 (Activation)    (None, 28, 28, 20)        0         
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 14, 14, 20)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 14, 14, 50)        25050     
_________________________________________________________________
activation_2 (Activation)    (None, 14, 14, 50)        0         
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 50)          0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 2450)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 500)               1225500   
_________________________________________________________________
activation_3 (Activation)    (None, 500)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 10)                5010      
_________________________________________________________________
activation_4 (Activation)    (None, 10)                0         
=================================================================
Total params: 1,256,080
Trainable params: 1,256,080
Non-trainable params: 0
_________________________________________________________________
Train on 48000 samples, validate on 12000 samples
Epoch 1/5
48000/48000 [==============================] - 43s 890us/step - loss: 0.4942 - accuracy: 0.8199 - val_loss: 0.3358 - val_accuracy: 0.8802
Epoch 2/5
48000/48000 [==============================] - 38s 788us/step - loss: 0.2944 - accuracy: 0.8927 - val_loss: 0.2885 - val_accuracy: 0.8955
Epoch 3/5
48000/48000 [==============================] - 65s 1ms/step - loss: 0.2436 - accuracy: 0.9107 - val_loss: 0.2820 - val_accuracy: 0.9012
Epoch 4/5
34944/48000 [====================>.........] - ETA: 11s - loss: 0.2101 - accuracy: 0.9236