可以參考: 如何使用Keras函數式API進行深度學習
使用tf.kears sequential的實例
import matplotlib as pl
import matplotlib.pyplot as plt
%matplotlib inline
import scikitplot as skplt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
print(tf.__version__)
print(sys.version_info)
for module in np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
# 導入mnist數據
fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]
print(x_valid.shape, y_valid.shape)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
# 標準化
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# x_train: [None, 28, 28] -> [None, 784]
x_train_scaled = scaler.fit_transform(
x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
# 使用訓練集fit好的值標準化驗證集和測試集
x_valid_scaled = scaler.transform(
x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_test_scaled = scaler.transform(
x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
# tf.keras.models.Sequential()
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
for _ in range(5):
model.add(keras.layers.Dense(100, activation="relu"))
# BatchNormalization
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer = keras.optimizers.SGD(0.001),
metrics = ["accuracy"])
model.summary()
logdir = './saved'
if not os.path.exists(logdir):
os.mkdir(logdir)
# 添加callbacks: Tensorboard, earlystopping, ModelCheckpoint
output_model_file = os.path.join(logdir,
"fashion_mnist_model.h5")
callbacks = [
keras.callbacks.TensorBoard(logdir),
keras.callbacks.ModelCheckpoint(output_model_file,
save_best_only = True),
keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),
]
history = model.fit(x_train_scaled, y_train, epochs=10,
validation_data=(x_valid_scaled, y_valid),
callbacks = callbacks)
# predict probabilities for test set
y_probs = model.predict(x_test, verbose=0)
# predict crisp classes for test set
ypred_classes = np.argmax(y_probs, axis=1)
# save and plot confusion matrix
matrix = confusion_matrix(ypred_classes, y_test)
print(matrix)
skplt.metrics.plot_confusion_matrix(ypred_classes, y_test, normalize=True)
plt.savefig('./saved/confusion_matrix.png', format='png')
plt.show()
# accuracy: (tp + tn) / (p + n)
accuracy = accuracy_score(y_test, ypred_classes)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, ypred_classes, average='micro')
print('Precision:', precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, ypred_classes, average='micro')
print('Recall:', recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, ypred_classes, average='micro')
print('F1 score:', f1)