For more projects visit: https://setscholars.net
# Suppress warnings in Jupyter Notebooks
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import tensorflow_datasets as tfds
from sklearn.model_selection import train_test_split
import autokeras as ak
print(ak.__version__)
import logging
tf.get_logger().setLevel(logging.ERROR)
1.0.16
# OpenML Dataset ID
whichDataset = 6 # provide dataset id
import openml
from openml.datasets import get_dataset
dataset = openml.datasets.get_dataset(whichDataset)
# Print a summary
print(
f"This is dataset '{dataset.name}', the target feature is "
f"'{dataset.default_target_attribute}'"
)
print(f"URL: {dataset.url}")
print(dataset.description)
This is dataset 'letter', the target feature is 'class' URL: https://www.openml.org/data/v1/download/6/letter.arff **Author**: David J. Slate **Source**: [UCI](https://archive.ics.uci.edu/ml/datasets/Letter+Recognition) - 01-01-1991 **Please cite**: P. W. Frey and D. J. Slate. "Letter Recognition Using Holland-style Adaptive Classifiers". Machine Learning 6(2), 1991 1. TITLE: Letter Image Recognition Data The objective is to identify each of a large number of black-and-white rectangular pixel displays as one of the 26 capital letters in the English alphabet. The character images were based on 20 different fonts and each letter within these 20 fonts was randomly distorted to produce a file of 20,000 unique stimuli. Each stimulus was converted into 16 primitive numerical attributes (statistical moments and edge counts) which were then scaled to fit into a range of integer values from 0 through 15. We typically train on the first 16000 items and then use the resulting model to predict the letter category for the remaining 4000. See the article cited above for more details.
X, y, categorical_indicator, attribute_names = dataset.get_data(
dataset_format="array", target=dataset.default_target_attribute)
dataset = pd.DataFrame(X, columns=attribute_names)
dataset["target"] = y
print(); print(dataset.shape)
print(); print(dataset.head())
print(); print(dataset.columns.values)
(20000, 17) x-box y-box width high onpix x-bar y-bar x2bar y2bar xybar x2ybr \ 0 2.0 4.0 4.0 3.0 2.0 7.0 8.0 2.0 9.0 11.0 7.0 1 4.0 7.0 5.0 5.0 5.0 5.0 9.0 6.0 4.0 8.0 7.0 2 7.0 10.0 8.0 7.0 4.0 8.0 8.0 5.0 10.0 11.0 2.0 3 4.0 9.0 5.0 7.0 4.0 7.0 7.0 13.0 1.0 7.0 6.0 4 6.0 7.0 8.0 5.0 4.0 7.0 6.0 3.0 7.0 10.0 7.0 xy2br x-ege xegvy y-ege yegvx target 0 7.0 1.0 8.0 5.0 6.0 25 1 9.0 2.0 9.0 7.0 10.0 15 2 8.0 2.0 5.0 5.0 10.0 18 3 8.0 3.0 8.0 0.0 8.0 7 4 9.0 3.0 8.0 3.0 7.0 7 ['x-box' 'y-box' 'width' 'high' 'onpix' 'x-bar' 'y-bar' 'x2bar' 'y2bar' 'xybar' 'x2ybr' 'xy2br' 'x-ege' 'xegvy' 'y-ege' 'yegvx' 'target']
# find missing values in data frame
print()
print(dataset.isnull().sum().sum())
# group by 'target'
#print()
#print(dataset.groupby('target').count())
0
# training and test data split
data = dataset.sample(frac=0.75, random_state=1234)
data_unseen = dataset.drop(data.index)
data.reset_index(inplace=True, drop=True)
data_unseen.reset_index(inplace=True, drop=True)
print('Data for Modeling: ' + str(data.shape))
print('Unseen Data For Predictions: ' + str(data_unseen.shape))
Data for Modeling: (15000, 17) Unseen Data For Predictions: (5000, 17)
import pandas_profiling
#df.profile_report()
#import sweetviz as sv
#sweet_report = sv.analyze(df)
#sweet_report.show_notebook(layout='vertical', w=880, h=1000,scale=0.8)
y = dataset.pop('target')
X = dataset
print(y.shape)
print(X.shape)
(20000,) (20000, 16)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
print(); print("Training Dataset:")
print(); print(X_train.shape)
print(); print(X_train.head())
print(); print(y_train.shape)
print(); print(y_train.head())
print(); print("\n\nTesting Dataset:")
print(); print(X_test.shape)
print(); print(X_test.head())
print(); print(y_test.shape)
print(); print(y_test.head())
Training Dataset: (16000, 16) x-box y-box width high onpix x-bar y-bar x2bar y2bar xybar \ 5894 4.0 10.0 4.0 7.0 3.0 7.0 6.0 15.0 2.0 7.0 3728 2.0 3.0 4.0 5.0 1.0 8.0 8.0 4.0 2.0 6.0 8958 10.0 15.0 10.0 8.0 5.0 8.0 6.0 5.0 6.0 10.0 7671 6.0 8.0 9.0 6.0 6.0 10.0 6.0 3.0 6.0 10.0 5999 3.0 6.0 6.0 4.0 5.0 5.0 9.0 2.0 3.0 10.0 x2ybr xy2br x-ege xegvy y-ege yegvx 5894 8.0 8.0 3.0 8.0 0.0 8.0 3728 13.0 8.0 3.0 10.0 0.0 8.0 8958 3.0 6.0 6.0 6.0 6.0 10.0 7671 3.0 8.0 4.0 8.0 5.0 10.0 5999 8.0 7.0 5.0 9.0 3.0 4.0 (16000,) 5894 7 3728 21 8958 3 7671 7 5999 5 Name: target, dtype: int64 Testing Dataset: (4000, 16) x-box y-box width high onpix x-bar y-bar x2bar y2bar xybar \ 10650 2.0 1.0 2.0 2.0 1.0 6.0 7.0 4.0 8.0 7.0 2041 1.0 9.0 0.0 6.0 0.0 7.0 7.0 4.0 4.0 7.0 8668 2.0 8.0 3.0 6.0 1.0 7.0 7.0 0.0 8.0 14.0 1114 5.0 11.0 7.0 8.0 6.0 7.0 7.0 8.0 5.0 6.0 13902 1.0 0.0 2.0 1.0 0.0 7.0 14.0 1.0 4.0 7.0 x2ybr xy2br x-ege xegvy y-ege yegvx 10650 6.0 11.0 3.0 8.0 5.0 9.0 2041 6.0 8.0 0.0 8.0 0.0 8.0 8668 6.0 9.0 0.0 8.0 1.0 8.0 1114 7.0 11.0 6.0 7.0 5.0 7.0 13902 10.0 8.0 0.0 8.0 0.0 8.0 (4000,) 10650 10 2041 8 8668 8 1114 14 13902 19 Name: target, dtype: int64
print(y_train.dtype)
print(y_test.dtype)
int64 int64
y_train = y_train.astype('str')
y_test = y_test.astype('str')
print(y_train.dtype)
print(y_test.dtype)
object object
# It tries 10 different models.
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=10)
#clf = ak.StructuredDataRegressor(overwrite=True, max_trials=10)
# Feed the structured data classifier with training data.
clf.fit(X_train, y_train, validation_split=0.15, epochs=100, batch_size=32, verbose=1)
print(); print("Model training is complete ... ... ...")
Trial 10 Complete [00h 01m 32s] val_accuracy: 0.7204166650772095 Best val_accuracy So Far: 0.7770833373069763 Total elapsed time: 00h 13m 27s Epoch 1/100 500/500 [==============================] - 1s 2ms/step - loss: 2.9102 - accuracy: 0.1626 Epoch 2/100 500/500 [==============================] - 1s 2ms/step - loss: 1.7481 - accuracy: 0.4964 Epoch 3/100 500/500 [==============================] - 1s 2ms/step - loss: 1.5004 - accuracy: 0.5695 Epoch 4/100 500/500 [==============================] - 1s 2ms/step - loss: 1.3599 - accuracy: 0.6053 Epoch 5/100 500/500 [==============================] - 1s 2ms/step - loss: 1.2580 - accuracy: 0.6328 Epoch 6/100 500/500 [==============================] - 1s 2ms/step - loss: 1.1840 - accuracy: 0.6545 Epoch 7/100 500/500 [==============================] - 1s 2ms/step - loss: 1.1264 - accuracy: 0.6685 Epoch 8/100 500/500 [==============================] - 1s 2ms/step - loss: 1.0789 - accuracy: 0.6827 Epoch 9/100 500/500 [==============================] - 1s 2ms/step - loss: 1.0401 - accuracy: 0.6930 Epoch 10/100 500/500 [==============================] - 1s 2ms/step - loss: 1.0046 - accuracy: 0.7049 Epoch 11/100 500/500 [==============================] - 1s 2ms/step - loss: 0.9722 - accuracy: 0.7139 Epoch 12/100 500/500 [==============================] - 1s 2ms/step - loss: 0.9448 - accuracy: 0.7216 Epoch 13/100 500/500 [==============================] - 1s 2ms/step - loss: 0.9205 - accuracy: 0.7290 Epoch 14/100 500/500 [==============================] - 1s 2ms/step - loss: 0.8982 - accuracy: 0.7373 Epoch 15/100 500/500 [==============================] - 1s 2ms/step - loss: 0.8790 - accuracy: 0.7394 Epoch 16/100 500/500 [==============================] - 1s 2ms/step - loss: 0.8618 - accuracy: 0.7451 Epoch 17/100 500/500 [==============================] - 1s 2ms/step - loss: 0.8458 - accuracy: 0.7479 Epoch 18/100 500/500 [==============================] - 1s 2ms/step - loss: 0.8308 - accuracy: 0.7527 Epoch 19/100 500/500 [==============================] - 1s 2ms/step - loss: 0.8179 - accuracy: 0.7568 Epoch 20/100 500/500 [==============================] - 1s 2ms/step - loss: 0.8060 - accuracy: 0.7600 Epoch 21/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7937 - accuracy: 0.7638 Epoch 22/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7828 - accuracy: 0.7673 Epoch 23/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7726 - accuracy: 0.7696 Epoch 24/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7627 - accuracy: 0.7727 Epoch 25/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7539 - accuracy: 0.7744 Epoch 26/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7444 - accuracy: 0.7781 Epoch 27/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7352 - accuracy: 0.7789 Epoch 28/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7273 - accuracy: 0.7817 Epoch 29/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7191 - accuracy: 0.7816 Epoch 30/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7118 - accuracy: 0.7849 Epoch 31/100 500/500 [==============================] - 1s 2ms/step - loss: 0.7046 - accuracy: 0.7871 Epoch 32/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6976 - accuracy: 0.7889 Epoch 33/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6913 - accuracy: 0.7906 Epoch 34/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6855 - accuracy: 0.7943 Epoch 35/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6798 - accuracy: 0.7948 Epoch 36/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6742 - accuracy: 0.7974 Epoch 37/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6688 - accuracy: 0.7973 Epoch 38/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6637 - accuracy: 0.7995 Epoch 39/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6584 - accuracy: 0.8017 Epoch 40/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6534 - accuracy: 0.8020 Epoch 41/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6477 - accuracy: 0.8037 Epoch 42/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6437 - accuracy: 0.8039 Epoch 43/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6388 - accuracy: 0.8058 Epoch 44/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6341 - accuracy: 0.8060 Epoch 45/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6301 - accuracy: 0.8083 Epoch 46/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6257 - accuracy: 0.8089 Epoch 47/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6212 - accuracy: 0.8098 Epoch 48/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6181 - accuracy: 0.8099 Epoch 49/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6143 - accuracy: 0.8115 Epoch 50/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6109 - accuracy: 0.8112 Epoch 51/100 500/500 [==============================] - 1s 1ms/step - loss: 0.6075 - accuracy: 0.8131 Epoch 52/100 500/500 [==============================] - 1s 2ms/step - loss: 0.6037 - accuracy: 0.8143 Epoch 53/100 500/500 [==============================] - 1s 1ms/step - loss: 0.6004 - accuracy: 0.8150 Epoch 54/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5969 - accuracy: 0.8161 Epoch 55/100 500/500 [==============================] - 1s 2ms/step - loss: 0.5941 - accuracy: 0.8178 Epoch 56/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5911 - accuracy: 0.8188 Epoch 57/100 500/500 [==============================] - 1s 2ms/step - loss: 0.5881 - accuracy: 0.8194 Epoch 58/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5850 - accuracy: 0.8203 Epoch 59/100 500/500 [==============================] - 1s 2ms/step - loss: 0.5820 - accuracy: 0.8205 Epoch 60/100 500/500 [==============================] - 1s 2ms/step - loss: 0.5799 - accuracy: 0.8222 Epoch 61/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5772 - accuracy: 0.8228 Epoch 62/100 500/500 [==============================] - 1s 2ms/step - loss: 0.5747 - accuracy: 0.8226 Epoch 63/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5716 - accuracy: 0.8233 Epoch 64/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5688 - accuracy: 0.8234 Epoch 65/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5663 - accuracy: 0.8256 Epoch 66/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5640 - accuracy: 0.8252 Epoch 67/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5617 - accuracy: 0.8269 Epoch 68/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5596 - accuracy: 0.8271 Epoch 69/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5574 - accuracy: 0.8282 Epoch 70/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5550 - accuracy: 0.8282 Epoch 71/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5528 - accuracy: 0.8301 Epoch 72/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5502 - accuracy: 0.8316 Epoch 73/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5484 - accuracy: 0.8333 Epoch 74/100 500/500 [==============================] - 1s 2ms/step - loss: 0.5463 - accuracy: 0.8335 Epoch 75/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5439 - accuracy: 0.8337 Epoch 76/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5421 - accuracy: 0.8343 Epoch 77/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5405 - accuracy: 0.8347 Epoch 78/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5388 - accuracy: 0.8342 Epoch 79/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5370 - accuracy: 0.8359 Epoch 80/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5354 - accuracy: 0.8359 Epoch 81/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5338 - accuracy: 0.8363 Epoch 82/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5324 - accuracy: 0.8363 Epoch 83/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5313 - accuracy: 0.8364 Epoch 84/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5298 - accuracy: 0.8367 Epoch 85/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5282 - accuracy: 0.8373 Epoch 86/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5268 - accuracy: 0.8369 Epoch 87/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5249 - accuracy: 0.8385 Epoch 88/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5237 - accuracy: 0.8385 Epoch 89/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5216 - accuracy: 0.8390 Epoch 90/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5201 - accuracy: 0.8390 Epoch 91/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5190 - accuracy: 0.8402 Epoch 92/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5170 - accuracy: 0.8405 Epoch 93/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5155 - accuracy: 0.8411 Epoch 94/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5141 - accuracy: 0.8420 Epoch 95/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5133 - accuracy: 0.8412 Epoch 96/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5120 - accuracy: 0.8418 Epoch 97/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5114 - accuracy: 0.8418 Epoch 98/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5099 - accuracy: 0.8416 Epoch 99/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5089 - accuracy: 0.8421 Epoch 100/100 500/500 [==============================] - 1s 1ms/step - loss: 0.5072 - accuracy: 0.8426
2022-03-24 04:11:02.007844: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.
Model training is complete ... ... ...
clf
<autokeras.tasks.structured_data.StructuredDataClassifier at 0x7f497a496890>
# Evaluate the best model with testing data.
print(); print();
print(clf.evaluate(X_test, y_test, verbose=1))
125/125 [==============================] - 0s 1ms/step - loss: 0.7690 - accuracy: 0.7815 [0.7690320611000061, 0.781499981880188]
# For Classification
import scikitplot as skplt
from sklearn.metrics import accuracy_score, classification_report
from sklearn.metrics import cohen_kappa_score, confusion_matrix
# Predict with the best model.
predicted_y = clf.predict(X_test, verbose=0)
# Evaluate the skill of the Trained model
acc = accuracy_score(y_test, predicted_y)
classReport = classification_report(y_test, predicted_y)
confMatrix = confusion_matrix(y_test, predicted_y)
print(); print('Testing Results of the trained model: ')
print(); print('Accuracy : ', acc)
#print(); print('Confusion Matrix :\n', confMatrix)
print(); print('Classification Report :\n',classReport)
# Confusion matrix
skplt.metrics.plot_confusion_matrix(y_test, predicted_y,figsize=(15, 15)); plt.show()
Testing Results of the trained model: Accuracy : 0.7815 Classification Report : precision recall f1-score support 0 0.92 0.86 0.89 153 1 0.58 0.78 0.67 139 10 0.66 0.58 0.62 153 11 0.81 0.89 0.84 166 12 0.87 0.89 0.88 156 13 0.81 0.79 0.80 143 14 0.70 0.73 0.72 158 15 0.80 0.88 0.84 173 16 0.71 0.69 0.70 161 17 0.76 0.65 0.70 144 18 0.74 0.74 0.74 157 19 0.82 0.85 0.83 169 2 0.81 0.81 0.81 142 20 0.72 0.89 0.79 156 21 0.87 0.81 0.84 155 22 0.84 0.90 0.87 143 23 0.73 0.81 0.77 153 24 0.87 0.80 0.83 173 25 0.92 0.86 0.89 142 3 0.74 0.63 0.68 166 4 0.83 0.74 0.78 154 5 0.75 0.74 0.74 156 6 0.68 0.66 0.67 145 7 0.79 0.68 0.74 146 8 0.93 0.85 0.89 165 9 0.75 0.78 0.76 132 accuracy 0.78 4000 macro avg 0.78 0.78 0.78 4000 weighted avg 0.79 0.78 0.78 4000
# For Regression
#print("********************************************************************")
#print(format(' Validation using Validation dataset ','*^65'))
#print("********************************************************************")
#import numpy as np
#import seaborn as sns
#import matplotlib.pyplot as plt
#import scikitplot as skplt
#from sklearn.metrics import r2_score, mean_squared_error
# Predict with the best model
#predicted_val = clf.predict(X_test, verbose = 0)
# Evaluate the skill of the Trained model
#r2 = r2_score(y_test, predicted_val)
#rmse = mean_squared_error(y_test, predicted_val)
#print(); print('Testing Results of the trained model: ')
#print(); print('R Squared (r2) Score : ', r2)
#print(); print('RMSE Score : ', np.sqrt(rmse))
#print()
#plt.figure(figsize = (8,8))
#sns.regplot(y_test, predicted_val, marker = "+")
#plt.show()
model = clf.export_model()
model.summary()
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 16)] 0 _________________________________________________________________ multi_category_encoding (Mul (None, 16) 0 _________________________________________________________________ normalization (Normalization (None, 16) 33 _________________________________________________________________ dense (Dense) (None, 32) 544 _________________________________________________________________ re_lu (ReLU) (None, 32) 0 _________________________________________________________________ dense_1 (Dense) (None, 32) 1056 _________________________________________________________________ re_lu_1 (ReLU) (None, 32) 0 _________________________________________________________________ dense_2 (Dense) (None, 32) 1056 _________________________________________________________________ re_lu_2 (ReLU) (None, 32) 0 _________________________________________________________________ dense_3 (Dense) (None, 26) 858 _________________________________________________________________ classification_head_1 (Softm (None, 26) 0 ================================================================= Total params: 3,547 Trainable params: 3,514 Non-trainable params: 33 _________________________________________________________________
from tensorflow.keras.utils import plot_model
plot_model(model)
print(type(model))
try:
model.save("best_keras_model", save_format="tf")
except Exception:
model.save("best_keras_model.h5")
<class 'tensorflow.python.keras.engine.functional.Functional'>
model = tf.keras.models.load_model('best_keras_model', custom_objects=ak.CUSTOM_OBJECTS)
model.summary()
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 16)] 0 _________________________________________________________________ multi_category_encoding (Mul (None, 16) 0 _________________________________________________________________ normalization (Normalization (None, 16) 33 _________________________________________________________________ dense (Dense) (None, 32) 544 _________________________________________________________________ re_lu (ReLU) (None, 32) 0 _________________________________________________________________ dense_1 (Dense) (None, 32) 1056 _________________________________________________________________ re_lu_1 (ReLU) (None, 32) 0 _________________________________________________________________ dense_2 (Dense) (None, 32) 1056 _________________________________________________________________ re_lu_2 (ReLU) (None, 32) 0 _________________________________________________________________ dense_3 (Dense) (None, 26) 858 _________________________________________________________________ classification_head_1 (Softm (None, 26) 0 ================================================================= Total params: 3,547 Trainable params: 3,514 Non-trainable params: 33 _________________________________________________________________
In this coding recipe, we discussed how to build a deep learning model in Python with AutoKeras.
Specifically, we have learned the followings: