tensorflow2.0筆記15:卷積網絡中的梯度求解以及CIFAR100與VGG13實戰!

卷積神經網絡中的求導理解!

一、梯度求解gradient

  • 對於一個卷積神經網絡,gradient具體是怎麼做的呢?
  • tensorflow爲我我們提供了一個自動求導的工具,我們剛纔從理論證明了tensorflow是可以完成我們的目的的。所以整個卷積層的梯度求導其實是完全可行,而且並不複雜,tensorflow可以完全幫助我們完成這部分工作的,我們只需要從原理上明白它是能求導,以及我們已經完成了上面最簡單的一個樣例的求導過程。

二、採樣層Pooling

2.1 下采樣down Sampling

2.2 上採樣up Sampling

  • tensorflow中實現上採樣
  • 補充一下ReLu,可視化的時候。

三、CIFAR100與VGG13實戰

3.1 程序處理流程

  • 使用了13層網絡。
  • 先模擬數據,簡單測試一下:
import tensorflow as tf
from tensorflow.python.keras import layers, optimizers, datasets,Sequential
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.random.set_seed(2345)

# 首先我們知道Sequential這個容器,接受一個13層的list.我們先組成list
conv_layers = [# 5 units of conv + max pooling
    # unit 1  一共10層,只算的conv層。
    layers.Conv2D(64, kernel_size=[3, 3],padding='same', activation='relu'),
    layers.Conv2D(64, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
    # unit 2
    layers.Conv2D(128, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.Conv2D(128, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
    # unit 3
    layers.Conv2D(256, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.Conv2D(256, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
    # unit 4
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
    # unit 5
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.Conv2D(512, kernel_size=[3, 3], padding='same', activation='relu'),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

]

def main():
    # 把參數放進Sequential容器
    # 輸入:[b, 32, 32, 3] => 輸出[b, 1, 1, 512]
    conv_net = Sequential(conv_layers)
    conv_net.build(input_shape=[None, 32, 32, 3])
    x = tf.random.normal([4, 32, 32, 3])
    out = conv_net(x)
    print(out.shape)

if __name__ == '__main__':
    main()
C:\Anaconda3\envs\tf2\python.exe E:/Codes/MyCodes/TF2/TF2_2/cifar100_train.py
(4, 1, 1, 512)

Process finished with exit code 0

3.2 代碼實現1:計算損失

  • 測試程序
import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets,Sequential
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.random.set_seed(2345)


# 數據預處理,僅僅是類型的轉換。    [0~1]
def preprocess(x, y):
    x = tf.cast(x, dtype=tf.float32) / 255.
    y = tf.cast(y, dtype=tf.int32)
    return x,y

# 數據集的加載
(x, y), (x_test, y_test) = datasets.cifar100.load_data()
print(x.shape, y.shape, x_test.shape, y_test.shape)

train_db = tf.data.Dataset.from_tensor_slices((x,y))
train_db = train_db.shuffle(1000).map(preprocess).batch(128)

test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db = test_db.map(preprocess).batch(64)

# 我們來測試一下sample的形狀。
sample = next(iter(train_db))
print('sample:',sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]).numpy(), tf.reduce_max(sample[0]).numpy()) #值範圍爲[0,1]
y = tf.squeeze(y)            # 或者tf.squeeze(y, axis=1)把1維度的squeeze掉。
y_test = tf.squeeze(y_test)  # 或者tf.squeeze(y, axis=1)把1維度的squeeze掉。
print(y.shape, y_test.shape)

# 首先我們知道Sequential這個容器,接受一個13層的list.我們先組成list;網絡的第一部分。
conv_layers = [ # 5 units of conv + max pooling
    # unit 1
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 2
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 3
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 4
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 5
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same')

]


def main():
    # 把參數放進Sequential容器
    # 輸入:[b, 32, 32, 3] => 輸出[b, 1, 1, 512]
    conv_net = Sequential(conv_layers)
    #conv_net.build(input_shape=[None, 32, 32, 3])
    # x = tf.random.normal([4, 32, 32, 3])
    # out = conv_net(x)
    # print(out.shape)

    # 創建全連接層網絡;網絡的第二部分;第二部分的輸入爲第一部分的輸出。
    fc_net = Sequential([
        layers.Dense(256, activation=tf.nn.relu),
        layers.Dense(128, activation=tf.nn.relu),
        layers.Dense(100, activation=None),
    ])

    # 這裏其實把一個網絡分成2個來寫,
    conv_net.build(input_shape=[None, 32, 32, 3])
    fc_net.build(input_shape=[None, 512])
    # 創建一個優化器
    optimizer = optimizers.Adam(lr=1e-4)
    conv_net.summary()
    fc_net.summary()
    # 下面的+表示拼接。python中的list列表拼接,2個列表變爲一個。
    # 例如:[1, 2] + [3, 4] => [1, 2, 3, 4]
    variables = conv_net.trainable_variables + fc_net.trainable_variables

    for epoch in range(50):

        for step, (x, y) in enumerate(train_db):
            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 1, 1, 512]
                out = conv_net(x)
                # 之後squeeze或者reshape爲平坦的flatten
                out = tf.reshape(out, [-1, 512])
                # 送入全連接層輸入,得到輸出logits
                # [b, 512] => [b, 100]
                logits = fc_net(out)
                #[b] => [b, 100]轉換爲熱編碼。
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss
                # [b]
                loss = tf.losses.categorical_crossentropy(y_onehot, logits,from_logits=True)
                loss = tf.reduce_mean(loss)
            # 梯度求解
            grads = tape.gradient(loss, variables)
            # 梯度更新
            optimizer.apply_gradients(zip(grads, variables))
            if step % 100 ==0:
                print(epoch, step, 'loss: ',float(loss))

if __name__ == '__main__':
    main()
  • 測試結果:運行結果只是部分。
(50000, 32, 32, 3) (50000,) (10000, 32, 32, 3) (10000,)
sample: (128, 32, 32, 3) (128,) tf.Tensor(0.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32)
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              multiple                  1792      
_________________________________________________________________
conv2d_1 (Conv2D)            multiple                  36928     
_________________________________________________________________
max_pooling2d (MaxPooling2D) multiple                  0         
_________________________________________________________________
conv2d_2 (Conv2D)            multiple                  73856     
_________________________________________________________________
conv2d_3 (Conv2D)            multiple                  147584    
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 multiple                  0         
_________________________________________________________________
conv2d_4 (Conv2D)            multiple                  295168    
_________________________________________________________________
conv2d_5 (Conv2D)            multiple                  590080    
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 multiple                  0         
_________________________________________________________________
conv2d_6 (Conv2D)            multiple                  1180160   
_________________________________________________________________
conv2d_7 (Conv2D)            multiple                  2359808   
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 multiple                  0         
_________________________________________________________________
conv2d_8 (Conv2D)            multiple                  2359808   
_________________________________________________________________
conv2d_9 (Conv2D)            multiple                  2359808   
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 multiple                  0         
=================================================================
Total params: 9,404,992
Trainable params: 9,404,992
Non-trainable params: 0
_________________________________________________________________
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense (Dense)                multiple                  131328    
_________________________________________________________________
dense_1 (Dense)              multiple                  32896     
_________________________________________________________________
dense_2 (Dense)              multiple                  12900     
=================================================================
Total params: 177,124
Trainable params: 177,124
Non-trainable params: 0
_________________________________________________________________
0 0 loss:  4.605030059814453
0 100 loss:  4.578884601593018
0 200 loss:  4.312553405761719
0 300 loss:  4.257984161376953
1 0 loss:  4.129902362823486
1 100 loss:  3.9750022888183594
1 200 loss:  3.9673705101013184
1 300 loss:  3.909595251083374
2 0 loss:  3.803706407546997
2 100 loss:  3.644826889038086
2 200 loss:  3.641634464263916
2 300 loss:  3.663766860961914
3 0 loss:  3.563760280609131
3 100 loss:  3.371027946472168
3 200 loss:  3.4057083129882812
3 300 loss:  3.4570555686950684
4 0 loss:  3.3294296264648438
4 100 loss:  3.088827133178711
4 200 loss:  3.2294821739196777
4 300 loss:  3.3365983963012695
5 0 loss:  3.099618911743164
5 100 loss:  2.874321460723877
5 200 loss:  3.0241520404815674
5 300 loss:  3.1384060382843018
6 0 loss:  2.960221290588379
6 100 loss:  2.6793859004974365
6 200 loss:  2.801387310028076
6 300 loss:  2.914591073989868
7 0 loss:  2.8104686737060547
7 100 loss:  2.4934065341949463
7 200 loss:  2.6673338413238525
7 300 loss:  2.7357730865478516
8 0 loss:  2.6788723468780518
8 100 loss:  2.3706564903259277
8 200 loss:  2.561516046524048
8 300 loss:  2.5908799171447754
9 0 loss:  2.6021549701690674
9 100 loss:  2.3007476329803467
9 200 loss:  2.3651485443115234
9 300 loss:  2.3753061294555664
10 0 loss:  2.4877257347106934
10 100 loss:  2.154949188232422
10 200 loss:  2.177879810333252
10 300 loss:  2.2868564128875732
11 0 loss:  2.4435768127441406
11 100 loss:  2.0976648330688477
11 200 loss:  2.083190441131592
11 300 loss:  2.255058526992798
12 0 loss:  2.197577714920044
12 100 loss:  1.991114616394043
12 200 loss:  2.0137827396392822
12 300 loss:  2.2239105701446533
13 0 loss:  2.0630240440368652
13 100 loss:  1.8681172132492065
13 200 loss:  1.8188812732696533
13 300 loss:  1.9735732078552246
14 0 loss:  1.974722981452942
14 100 loss:  1.745096206665039
14 200 loss:  1.850353479385376
14 300 loss:  1.7462306022644043
15 0 loss:  1.8928546905517578
15 100 loss:  1.6059191226959229
15 200 loss:  1.8848950862884521
15 300 loss:  1.6559256315231323
16 0 loss:  1.9011915922164917
16 100 loss:  1.6085309982299805
16 200 loss:  1.7225948572158813
16 300 loss:  1.2713980674743652
17 0 loss:  1.7368441820144653
17 100 loss:  1.604355812072754
17 200 loss:  1.3732177019119263
17 300 loss:  1.2418514490127563
18 0 loss:  1.5340590476989746
18 100 loss:  1.3081270456314087
18 200 loss:  1.1169211864471436
18 300 loss:  1.2649688720703125
19 0 loss:  1.3473944664001465
19 100 loss:  1.0636374950408936
19 200 loss:  1.0549581050872803
19 300 loss:  1.2627246379852295
20 0 loss:  1.1263091564178467
20 100 loss:  0.8297935724258423
20 200 loss:  0.7956967353820801
20 300 loss:  1.1769788265228271
21 0 loss:  0.7848083972930908
21 100 loss:  0.8844348192214966
21 200 loss:  1.052968144416809
21 300 loss:  0.8638783693313599
22 0 loss:  1.0461111068725586
22 100 loss:  0.4385879635810852
22 200 loss:  1.7429406642913818
22 300 loss:  0.8668215274810791
23 0 loss:  0.7342086434364319
23 100 loss:  0.5434688329696655
23 200 loss:  0.881458044052124
23 300 loss:  0.9320999383926392
24 0 loss:  0.435170441865921
24 100 loss:  0.5420732498168945
24 200 loss:  0.8091937303543091
24 300 loss:  0.6172211766242981
25 0 loss:  0.6360988020896912
25 100 loss:  0.512031614780426
25 200 loss:  0.8759192228317261
25 300 loss:  0.890602171421051
26 0 loss:  0.35719141364097595
26 100 loss:  0.20831003785133362
26 200 loss:  0.5368062257766724
26 300 loss:  0.43766921758651733
27 0 loss:  0.11938989162445068
27 100 loss:  0.15537163615226746
27 200 loss:  0.3271629810333252
27 300 loss:  0.26201626658439636
28 0 loss:  0.17002826929092407
28 100 loss:  0.2737042009830475
28 200 loss:  0.23919501900672913
28 300 loss:  0.250547856092453
29 0 loss:  0.07753895968198776
29 100 loss:  0.44712144136428833
29 200 loss:  0.26453500986099243
29 300 loss:  0.34964966773986816
30 0 loss:  0.12882404029369354
30 100 loss:  0.40466374158859253
30 200 loss:  0.130931094288826
30 300 loss:  0.21530690789222717
31 0 loss:  0.13348370790481567
31 100 loss:  0.3387177586555481
31 200 loss:  0.14717939496040344
31 300 loss:  0.12074695527553558
32 0 loss:  0.17923486232757568
32 100 loss:  0.1532021164894104
32 200 loss:  0.14622066915035248
32 300 loss:  0.18624472618103027
33 0 loss:  0.07511971890926361
33 100 loss:  0.1993311047554016
33 200 loss:  0.06216352432966232
33 300 loss:  0.1094805970788002
34 0 loss:  0.04453448951244354
34 100 loss:  0.1695408821105957
34 200 loss:  0.11287301033735275
34 300 loss:  0.10849308967590332
35 0 loss:  0.08756822347640991
35 100 loss:  0.1913621425628662
35 200 loss:  0.04925493150949478
35 300 loss:  0.15203890204429626
36 0 loss:  0.09226275980472565
36 100 loss:  0.06079081445932388
36 200 loss:  0.12670479714870453
36 300 loss:  0.07804378867149353
37 0 loss:  0.08540204167366028
37 100 loss:  0.1706801950931549
37 200 loss:  0.11071468889713287
37 300 loss:  0.06934467703104019
38 0 loss:  0.11754795908927917
38 100 loss:  0.07747013121843338
38 200 loss:  0.13458500802516937
38 300 loss:  0.11098429560661316
39 0 loss:  0.07163702696561813
39 100 loss:  0.06907875090837479
39 200 loss:  0.0505969263613224
39 300 loss:  0.06996821612119675
40 0 loss:  0.08378084748983383
40 100 loss:  0.11908353865146637
40 200 loss:  0.05944759398698807
40 300 loss:  0.08038626611232758
41 0 loss:  0.18636730313301086
41 100 loss:  0.061627257615327835
41 200 loss:  0.07231491059064865
41 300 loss:  0.07569274306297302
42 0 loss:  0.07175879180431366
42 100 loss:  0.10492654144763947
42 200 loss:  0.18038496375083923
42 300 loss:  0.055353861302137375
43 0 loss:  0.060211874544620514
43 100 loss:  0.1705894023180008
43 200 loss:  0.07525692135095596
43 300 loss:  0.13717079162597656
44 0 loss:  0.1381378471851349
44 100 loss:  0.08847591280937195
44 200 loss:  0.05310193821787834
44 300 loss:  0.15411436557769775
45 0 loss:  0.10627441108226776
45 100 loss:  0.20189404487609863
45 200 loss:  0.07906632125377655
45 300 loss:  0.030647413805127144
46 0 loss:  0.07269147783517838
46 100 loss:  0.0759992003440857
46 200 loss:  0.0839959979057312
46 300 loss:  0.06977201253175735
47 0 loss:  0.1642918586730957
47 100 loss:  0.15189684927463531
47 200 loss:  0.23271062970161438
47 300 loss:  0.08028055727481842
48 0 loss:  0.029950547963380814
48 100 loss:  0.09120866656303406
48 200 loss:  0.13170400261878967
48 300 loss:  0.11029703170061111
49 0 loss:  0.04037533327937126
49 100 loss:  0.14999404549598694
49 200 loss:  0.07472165673971176
49 300 loss:  0.09982065856456757

Process finished with exit code 0


3.3 代碼實現2:測試準確率

  • 在每個數據集迭代完成之後做測試,這個位置取決於我們自己,我們也可以在每個數據集迭代epoch完成之後做一個測試,或者在每100個step完成之後做一次測試,根據自己而定。
import  tensorflow as tf
from    tensorflow.keras import layers, optimizers, datasets, Sequential
import  os

os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
tf.random.set_seed(2345)

# 首先我們知道Sequential這個容器,接受一個13層的list.我們先組成list;網絡的第一部分。
conv_layers = [ # 5 units of conv + max pooling
    # unit 1
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 2
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 3
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 4
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 5
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same')

]

# 數據預處理,僅僅是類型的轉換。    [0~1]
def preprocess(x, y):
    x = tf.cast(x, dtype=tf.float32) / 255.
    y = tf.cast(y, dtype=tf.int32)
    return x,y

# 數據集的加載
(x, y), (x_test, y_test) = datasets.cifar100.load_data()
y = tf.squeeze(y)            # 或者tf.squeeze(y, axis=1)把1維度的squeeze掉。
y_test = tf.squeeze(y_test)  # 或者tf.squeeze(y, axis=1)把1維度的squeeze掉。
print(x.shape, y.shape, x_test.shape, y_test.shape)

train_db = tf.data.Dataset.from_tensor_slices((x,y))
train_db = train_db.shuffle(1000).map(preprocess).batch(128)

test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db = test_db.map(preprocess).batch(128)

# 我們來測試一下sample的形狀。
sample = next(iter(train_db))
print('sample:',sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]), tf.reduce_max(sample[0])) #值範圍爲[0,1]

def main():

    # 把參數放進Sequential容器
    # 輸入:[b, 32, 32, 3] => 輸出[b, 1, 1, 512]
    conv_net = Sequential(conv_layers)
    #conv_net.build(input_shape=[None, 32, 32, 3])
    # x = tf.random.normal([4, 32, 32, 3])
    # out = conv_net(x)
    # print(out.shape)

    # 創建全連接層網絡;網絡的第二部分;第二部分的輸入爲第一部分的輸出。
    fc_net = Sequential([
        layers.Dense(256, activation=tf.nn.relu),
        layers.Dense(128, activation=tf.nn.relu),
        layers.Dense(100, activation=None),
    ])

    # 這裏其實把一個網絡分成2個來寫,
    conv_net.build(input_shape=[None, 32, 32, 3])
    fc_net.build(input_shape=[None, 512])
    # 創建一個優化器
    optimizer = optimizers.Adam(lr=1e-4)
    conv_net.summary()
    fc_net.summary()

    # 下面的+表示拼接。python中的list列表拼接,2個列表變爲一個。
    # 例如:[1, 2] + [3, 4] => [1, 2, 3, 4]
    variables = conv_net.trainable_variables + fc_net.trainable_variables
    for epoch in range(50):

        for step, (x, y) in enumerate(train_db):
            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 1, 1, 512]
                out = conv_net(x)
                # 之後squeeze或者reshape爲平坦的flatten;flatten, => [b, 512]
                out = tf.reshape(out, [-1, 512])
                # 送入全連接層輸入,得到輸出logits
                # [b, 512] => [b, 100]
                logits = fc_net(out)
                #[b] => [b, 100]轉換爲熱編碼。
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss   結果維度[b]
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)

            # 梯度求解
            grads = tape.gradient(loss, variables)
            # 梯度更新
            optimizer.apply_gradients(zip(grads, variables))

            if step % 100 == 0:
                print(epoch, step, 'loss:', float(loss))
                
        # 做測試
        total_num = 0
        total_correct = 0
        for x,y in test_db:

            out = conv_net(x)
            out = tf.reshape(out, [-1, 512])
            logits = fc_net(out)
            # 預測可能性。
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)      #還記得嗎pred類型爲int64,需要轉換一下。
            pred = tf.cast(pred, dtype=tf.int32)

            # 拿到預測值pred和真實值比較。
            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)  # 轉換爲numpy數據

        acc = total_correct / total_num
        print(epoch, 'acc:', acc)


if __name__ == '__main__':
    main()
  • 測試結果(服務器上跑的epoch設置爲50):
(50000, 32, 32, 3) (50000,) (10000, 32, 32, 3) (10000,)
sample: (128, 32, 32, 3) (128,) tf.Tensor(0.0, shape=(), dtype=float32) tf.Tensor(1.0, shape=(), dtype=float32)
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              multiple                  1792      
_________________________________________________________________
conv2d_1 (Conv2D)            multiple                  36928     
_________________________________________________________________
max_pooling2d (MaxPooling2D) multiple                  0         
_________________________________________________________________
conv2d_2 (Conv2D)            multiple                  73856     
_________________________________________________________________
conv2d_3 (Conv2D)            multiple                  147584    
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 multiple                  0         
_________________________________________________________________
conv2d_4 (Conv2D)            multiple                  295168    
_________________________________________________________________
conv2d_5 (Conv2D)            multiple                  590080    
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 multiple                  0         
_________________________________________________________________
conv2d_6 (Conv2D)            multiple                  1180160   
_________________________________________________________________
conv2d_7 (Conv2D)            multiple                  2359808   
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 multiple                  0         
_________________________________________________________________
conv2d_8 (Conv2D)            multiple                  2359808   
_________________________________________________________________
conv2d_9 (Conv2D)            multiple                  2359808   
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 multiple                  0         
=================================================================
Total params: 9,404,992
Trainable params: 9,404,992
Non-trainable params: 0
_________________________________________________________________
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense (Dense)                multiple                  131328    
_________________________________________________________________
dense_1 (Dense)              multiple                  32896     
_________________________________________________________________
dense_2 (Dense)              multiple                  12900     
=================================================================
Total params: 177,124
Trainable params: 177,124
Non-trainable params: 0
_________________________________________________________________
0 0 loss: 4.605029582977295
0 100 loss: 4.580860137939453
0 200 loss: 4.370520114898682
0 300 loss: 4.2247443199157715
0 acc: 0.0631
1 0 loss: 4.131040573120117
1 100 loss: 3.9154152870178223
1 200 loss: 3.958664894104004
1 300 loss: 3.886810302734375
1 acc: 0.1146
2 0 loss: 3.8142406940460205
2 100 loss: 3.6943631172180176
2 200 loss: 3.622744560241699
2 300 loss: 3.6687121391296387
2 acc: 0.1636
3 0 loss: 3.540069580078125
3 100 loss: 3.3891937732696533
3 200 loss: 3.380049467086792
3 300 loss: 3.5331950187683105
3 acc: 0.2009
4 0 loss: 3.3657617568969727
4 100 loss: 3.064152240753174
4 200 loss: 3.154707908630371
4 300 loss: 3.3195037841796875
4 acc: 0.2324
5 0 loss: 3.1132378578186035
5 100 loss: 2.8564047813415527
5 200 loss: 2.991230010986328
5 300 loss: 3.123786449432373
5 acc: 0.2572
6 0 loss: 2.9249258041381836
6 100 loss: 2.6801297664642334
6 200 loss: 2.8119258880615234
6 300 loss: 2.8652963638305664
6 acc: 0.2711
7 0 loss: 2.741483211517334
7 100 loss: 2.5057296752929688
7 200 loss: 2.620211601257324
7 300 loss: 2.752559185028076
8 0 loss: 2.6997134685516357
8 100 loss: 2.371732711791992
8 200 loss: 2.5046632289886475
8 300 loss: 2.596170425415039
8 acc: 0.2781
9 0 loss: 2.6151163578033447
9 100 loss: 2.255403757095337
9 200 loss: 2.3547332286834717
9 300 loss: 2.5324296951293945
9 acc: 0.2977
10 0 loss: 2.510648488998413
10 100 loss: 2.2354989051818848
10 200 loss: 2.2526674270629883
10 300 loss: 2.285651683807373
10 acc: 0.3057
11 0 loss: 2.319749355316162
11 100 loss: 1.9863653182983398
11 200 loss: 2.173501491546631
11 300 loss: 2.1503279209136963
11 acc: 0.3074
12 0 loss: 2.106405019760132
12 100 loss: 1.8547744750976562
12 200 loss: 2.030120611190796
12 300 loss: 2.016551971435547
12 acc: 0.306
13 0 loss: 1.921705961227417
13 100 loss: 1.6969841718673706
13 200 loss: 1.7384164333343506
13 300 loss: 1.6263337135314941
13 acc: 0.3022
14 0 loss: 1.8699954748153687
14 100 loss: 1.5835696458816528
14 200 loss: 1.6531994342803955
14 300 loss: 1.8208564519882202
14 acc: 0.3018
15 0 loss: 1.614569902420044
15 100 loss: 1.2630252838134766
15 200 loss: 1.63907790184021
15 300 loss: 1.5227338075637817
15 acc: 0.3016
16 0 loss: 1.5800502300262451
16 100 loss: 1.1279842853546143
16 200 loss: 1.6005079746246338
16 300 loss: 1.3861875534057617
16 acc: 0.2849
17 0 loss: 1.6881680488586426
17 100 loss: 0.9800716638565063
17 200 loss: 1.3455613851547241
17 300 loss: 1.0564796924591064
17 acc: 0.2763
18 0 loss: 1.2472107410430908
18 100 loss: 0.7723194360733032
18 200 loss: 1.1151444911956787
18 300 loss: 0.788750171661377
18 acc: 0.2831
19 0 loss: 0.9305314421653748
19 100 loss: 0.7008259296417236
19 200 loss: 0.8505905866622925
19 300 loss: 0.9073967933654785
19 acc: 0.2723
20 0 loss: 1.1224623918533325
20 100 loss: 0.6999978423118591
20 200 loss: 0.7115190625190735
20 300 loss: 0.9408319592475891
20 acc: 0.25
21 0 loss: 1.2477564811706543
21 100 loss: 0.674743115901947
21 200 loss: 0.3369559943675995
21 300 loss: 0.7261745929718018
21 acc: 0.2654
22 0 loss: 0.7425245046615601
22 100 loss: 0.3677942454814911
22 200 loss: 0.40949755907058716
22 300 loss: 0.6454076766967773
22 acc: 0.2744
23 0 loss: 0.6252273321151733
23 100 loss: 0.3592641353607178
23 200 loss: 0.35914134979248047
23 300 loss: 0.5348892211914062
23 acc: 0.2772
24 0 loss: 0.6438977122306824
24 100 loss: 0.4346071481704712
24 200 loss: 0.3179160952568054
24 300 loss: 0.38192832469940186
24 acc: 0.2796
25 0 loss: 0.30675286054611206
25 100 loss: 0.1806555688381195
25 200 loss: 0.32784852385520935
25 300 loss: 0.3345670700073242
25 acc: 0.2797
26 0 loss: 0.15493153035640717
26 100 loss: 0.26428651809692383
26 200 loss: 0.24280449748039246
26 300 loss: 0.21679618954658508
26 acc: 0.2865
27 0 loss: 0.11476347595453262
27 100 loss: 0.24579459428787231
27 200 loss: 0.15920010209083557
27 300 loss: 0.17341411113739014
27 acc: 0.2838
28 0 loss: 0.10326501727104187
28 100 loss: 0.20288777351379395
28 200 loss: 0.16056469082832336
28 300 loss: 0.3395807147026062
28 acc: 0.2931
29 0 loss: 0.24492518603801727
29 100 loss: 0.1633157730102539
29 200 loss: 0.14137093722820282
29 300 loss: 0.20660582184791565
29 acc: 0.2926
30 0 loss: 0.13092076778411865
30 100 loss: 0.13318170607089996
30 200 loss: 0.11129891127347946
30 300 loss: 0.14153015613555908
30 acc: 0.2916
31 0 loss: 0.1296483874320984
31 100 loss: 0.1882035732269287
31 200 loss: 0.1775294989347458
31 300 loss: 0.14510029554367065
31 acc: 0.2964
32 0 loss: 0.18870742619037628
32 100 loss: 0.12179140746593475
32 200 loss: 0.06527263671159744
32 300 loss: 0.21518485248088837
32 acc: 0.2963
33 0 loss: 0.14098259806632996
33 100 loss: 0.07932665199041367
33 200 loss: 0.05747982859611511
33 300 loss: 0.12328965216875076
33 acc: 0.2907
34 0 loss: 0.17796023190021515
34 100 loss: 0.06805160641670227
34 200 loss: 0.10021108388900757
34 300 loss: 0.15585386753082275
34 acc: 0.2934
35 0 loss: 0.16877952218055725
35 100 loss: 0.1032438576221466
35 200 loss: 0.20572063326835632
35 300 loss: 0.11727580428123474
35 acc: 0.2911
36 0 loss: 0.09185600280761719
36 100 loss: 0.1325276792049408
36 200 loss: 0.10882830619812012
36 300 loss: 0.1802942305803299
36 acc: 0.289
37 0 loss: 0.04685244336724281
37 100 loss: 0.19100454449653625
37 200 loss: 0.02111639827489853
37 300 loss: 0.08410722762346268
37 acc: 0.29
38 0 loss: 0.06354247033596039
38 100 loss: 0.2206110656261444
38 200 loss: 0.06790764629840851
38 300 loss: 0.0827399417757988
38 acc: 0.2858
39 0 loss: 0.181129589676857
39 100 loss: 0.08104170113801956
39 200 loss: 0.10692848265171051
39 300 loss: 0.05646608769893646
39 acc: 0.2885
40 0 loss: 0.14817684888839722
40 100 loss: 0.06977212429046631
40 200 loss: 0.14923086762428284
40 300 loss: 0.08255875110626221
40 acc: 0.2878
41 0 loss: 0.11980663239955902
41 100 loss: 0.08682526648044586
41 200 loss: 0.060800135135650635
41 300 loss: 0.1181347519159317
41 acc: 0.2874
42 0 loss: 0.10507871955633163
42 100 loss: 0.05973637104034424
42 200 loss: 0.2080913484096527
42 300 loss: 0.11824189871549606
42 acc: 0.2842
43 0 loss: 0.09621529281139374
43 100 loss: 0.11187084764242172
43 200 loss: 0.05400624871253967
43 300 loss: 0.13920530676841736
43 acc: 0.2822
44 0 loss: 0.11675436049699783
44 100 loss: 0.0705123245716095
44 200 loss: 0.04682890698313713
44 300 loss: 0.23516617715358734
44 acc: 0.2757
45 0 loss: 0.13031496107578278
45 100 loss: 0.0411326140165329
45 200 loss: 0.07050048559904099
45 300 loss: 0.14312365651130676
45 acc: 0.2841
46 0 loss: 0.09100533276796341
46 100 loss: 0.06072338670492172
46 200 loss: 0.18048375844955444
46 300 loss: 0.17678526043891907
46 acc: 0.2883
47 0 loss: 0.0403500534594059
47 100 loss: 0.12216447293758392
47 200 loss: 0.10483959317207336
47 300 loss: 0.17612355947494507
47 acc: 0.282
48 0 loss: 0.19901448488235474
48 100 loss: 0.0947040468454361
48 200 loss: 0.02527277171611786
48 300 loss: 0.09630846977233887
48 acc: 0.2926
49 0 loss: 0.09839979559183121
49 100 loss: 0.06749995797872543
49 200 loss: 0.10177887976169586
49 300 loss: 0.0753551721572876
49 acc: 0.2921

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章