本文不做多餘解釋,主要是銜接上一章TensorFlow-2.x-03-從0開始的多分類邏輯迴歸內容,使用TF-2.x來快速實現多分類邏輯迴歸。
1、獲取/讀取數據集,歸一化數據
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print(x_train.shape,y_train.shape)
2、定義及初始化模型
def net():
net=tf.keras.Sequential()
net.add(tf.keras.layers.Flatten(input_shape=(28,28)))
net.add(tf.keras.layers.Dense(10,activation=tf.nn.softmax)) #輸出10個神經元,softmax用於分類
return net
3、定義優化器
optimizer = tf.keras.optimizers.SGD(0.1)
4、模型訓練
model.compile(optimizer=optimizer,
loss = 'sparse_categorical_crossentropy', #交叉熵損失
metrics=['accuracy']) # 準確率
model.fit(x_train,y_train,epochs=5,batch_size=256)
訓練結果:
#5、模型驗證
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test Acc:',test_acc)
驗證結果:
附上所有源碼:
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow import data as tfdata
from tensorflow.keras.datasets import fashion_mnist
# 1、獲取和讀取數據
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
print(x_train.shape,y_train.shape)
#2、定義及初始化模型
def net():
net=tf.keras.Sequential()
net.add(tf.keras.layers.Flatten(input_shape=(28,28)))
net.add(tf.keras.layers.Dense(10,activation=tf.nn.softmax)) #輸出10個神經元,softmax用於分類
return net
model=net()
model.summary()
#3、定義優化器
optimizer = tf.keras.optimizers.SGD(0.1)
# 4、模型訓練
model.compile(optimizer=optimizer,
loss = 'sparse_categorical_crossentropy', #交叉熵損失
metrics=['accuracy']) # 準確率
model.fit(x_train,y_train,epochs=5,batch_size=256)
# 5、模型驗證
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test Acc:',test_acc)