cats and dogs tensorflow - 1模型建立


# -*- coding: utf-8 -*-
import tensorflow as tf
def inference(image,batch_size,n_classes):
    #conv1 image -[batch_size,image_w,image_h,channels] 類型 :tensor   image_w,h 以256*256爲例
    with tf.variable_scope('conv1') as scope:
    #3*3*3 卷積核 ,16 個神經元
        weights = tf.get_variable('weights',shape = [3,3,3,16],
                                  dtype = tf.float32,
                                  initializer = tf.truncated_normal_initializer(stddev = 0.1,dtype = tf.float32))
        biases = tf.get_variable('biases',shape = [16],dtype = tf.float32,initializer = tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(image,weights,strides=[1,1,1,1],padding = 'SAME')
        pre_activation = tf.nn.bias_add(conv,biases)
        conv1 = tf.nn.relu(pre_activation,name = scope.name)
        #print("image的shape是",image.get_shape())
        #print("conv1的shape是",conv1.get_shape())
    #pooling and norm1
    with tf.variable_scope('pool1_norm1') as scope:
        pool1 = tf.nn.max_pool(conv1,ksize = [1,3,3,1],strides = [1,2,2,1],padding = 'SAME',name = 'pooling1')
        norm1 = tf.nn.lrn(pool1,depth_radius = 4,bias = 1.0,alpha = 0.001/9.0,beta = 0.75,name = 'norm1')
    #conv2
    with tf.variable_scope('conv2') as scope:
        weights = tf.get_variable('weights',shape = [3,3,16,16],
                                  dtype = tf.float32,
                                  initializer = tf.truncated_normal_initializer(stddev = 0.1,dtype = tf.float32))
        biases = tf.get_variable('biases',shape = [16],dtype = tf.float32,initializer = tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(norm1,weights,strides=[1,1,1,1],padding = 'SAME')
        pre_activation = tf.nn.bias_add(conv,biases)
        conv2 = tf.nn.relu(pre_activation,name = scope.name)
        print("conv2的shape是",conv2.get_shape())
    #norm2 and pooling2
    with tf.variable_scope('pool2_norm2') as scope:
        norm2 = tf.nn.lrn(conv2,depth_radius = 4,bias = 1.0,alpha = 0.001/9.0,beta = 0.75,name = 'norm2')
        pool2 = tf.nn.max_pool(norm2,ksize = [1,3,3,1],strides = [1,2,2,1],padding = 'SAME',name = 'pooling2')
        print("pool2的shape是",pool2.get_shape())
        
    #local1   
    with tf.variable_scope('local1') as scope:
        
        dim = 1
        for d in pool2.get_shape()[1:].as_list():
            dim *= d   #如果是pool2維度信息-[batch_size,64,64,16] 那麼dim就是65536 = 64*64*16
        reshape = tf.reshape(pool2, [batch_size, -1]) #將pool2拉直-[8,65536] batch_size=8 
                                                            #tf.get_shape() 中的元素只能是tensor,且返回的是一個元組但是可用 
                                                            #x.get_shape().as_list() 變成list。
                                                            #tf.shape()中的元素可以是tensor,list,array
#        reshape = tf.reshape(pool2,[batch_size,65536])
#        print(reshape.get_shape().as_list())
#        dim = reshape.get_shape().as_list()
#        print(dim[1])
        weights = tf.get_variable('weights',shape = [dim,128],
                                  dtype = tf.float32,
                                  initializer = tf.truncated_normal_initializer(stddev = 0.1,dtype = tf.float32))
        biases = tf.get_variable('biases',shape = [128],dtype = tf.float32,initializer = tf.constant_initializer(0.1))
        local1 = tf.nn.relu(tf.matmul(reshape,weights)+biases,name = scope.name)
    #local2
    with tf.variable_scope('local2') as scope:
        weights = tf.get_variable('weights',shape = [128,128],
                                  dtype = tf.float32,
                                  initializer = tf.truncated_normal_initializer(stddev = 0.005,dtype = tf.float32))
        biases = tf.get_variable('biases',shape = [128],dtype = tf.float32,initializer = tf.constant_initializer(0.1))
        local2 = tf.nn.relu(tf.matmul(local1,weights)+biases,name = scope.name)
    #softmax
    with tf.variable_scope('softmax_linear') as scope:
        weights = tf.get_variable('weights',shape = [128,n_classes],
                                  dtype = tf.float32,
                                  initializer = tf.truncated_normal_initializer(stddev = 0.005,dtype = tf.float32))
        biases = tf.get_variable('biases',shape = [n_classes],dtype = tf.float32,initializer = tf.constant_initializer(0.1))
        softmax_linear = tf.add(tf.matmul(local2,weights),biases,name = 'softmax')
        #最後這裏不用加激活函數,因爲計算調用計算loss的函數的時候,其會在內部實現。所以這裏不用手動寫了
        return softmax_linear

def losses(logits,labels):
    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits,
                                                                       labels = labels,name = "xentropy_per_example") # batch中每一個圖像都有一個cross_entropy
        loss = tf.reduce_mean(cross_entropy,name= 'loss') #全部加和求平均
    return  loss    

def trainning(loss,learning_rate):
    with tf.variable_scope('trainning') as scope:
        
        optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
        global_step = tf.Variable(0,name = 'global_step',trainable = False) #global_step 計數的作用,計算運行了多少個batch
        train_op = optimizer.minimize(loss,global_step = global_step) 
        
    return train_op

def evaluation(logits,label):
    with tf.variable_scope('evaluation') as scope:
        
        correct = tf.nn.in_top_k(logits,label,1) #correct裏面的類型是bool類型[true,false,..]
        correct = tf.cast(correct,tf.float16)#[1,0,...]
        accuary = tf.reduce_mean(correct)
        
        tf.summary.scalar(scope.name + '/accuary',accuary)
    
    return accuary
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章