CNN之LeNet

LeNet實現(TensorFlow&PyTorch)

TensorFlow

import tensorflow as tf 

def LeNet(input_tensor,train,regularizer):
 
    #第一層:卷積層,卷積核5×5,輸入1,輸出6,不使用全0補充,步長爲1。
    #尺寸變化:32×32×1->28×28×6
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable('weight',[5,5,1,6],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable('bias',[6],initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='VALID')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
 
    #第二層:池化層,過濾器的尺寸爲2×2,使用全0補充,步長爲2。
    #尺寸變化:28×28×6->14×14×6
    with tf.name_scope('layer2-pool1'):
        pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
 
    #第三層:卷積層,過濾器的尺寸爲5×5,深度爲16,不使用全0補充,步長爲1
    #尺寸變化:14×14×6->10×10×16
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable('weight',[5,5,6,16],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable('bias',[16],initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='VALID')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
 
    #第四層:池化層,過濾器的尺寸爲2×2,使用全0補充,步長爲2。
    #尺寸變化:10×10×6->5×5×16
    with tf.variable_scope('layer4-pool2'):
        pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
 
    #將第四層池化層的輸出轉化爲第五層全連接層的輸入格式。

    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
    reshaped = tf.reshape(pool2,[-1,nodes])
 
    #第五層:全連接層,nodes=5×5×16=400,400->120的全連接
    #尺寸變化:比如一組訓練樣本爲64,那麼尺寸變化爲64×400->64×120

    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable('weight',[nodes,120],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses',regularizer(fc1_weights))
        fc1_biases = tf.get_variable('bias',[120],initializer=tf.constant_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights) + fc1_biases)
        if train:
            fc1 = tf.nn.dropout(fc1,0.5)
 
    #第六層:全連接層,120->84的全連接
    #尺寸變化:比如一組訓練樣本爲64,那麼尺寸變化爲64×120->64×84
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable('weight',[120,84],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses',regularizer(fc2_weights))
        fc2_biases = tf.get_variable('bias',[84],initializer=tf.truncated_normal_initializer(stddev=0.1))
        fc2 = tf.nn.relu(tf.matmul(fc1,fc2_weights) + fc2_biases)
        if train:
            fc2 = tf.nn.dropout(fc2,0.5)
 
    #第七層:全連接層(近似表示),84->10的全連接
    #尺寸變化:比如一組訓練樣本爲64,那麼尺寸變化爲64×84->64×10。最後,64×10的矩陣經過softmax之後就得出了64張圖片分類於每種數字的概率,
    #即得到最後的分類結果。
    with tf.variable_scope('layer7-fc3'):
        fc3_weights = tf.get_variable('weight',[84,4],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses',regularizer(fc3_weights))
        fc3_biases = tf.get_variable('bias',[4],initializer=tf.truncated_normal_initializer(stddev=0.1))
        logit = tf.matmul(fc2,fc3_weights) + fc3_biases

    return logit

PyTorch

import torch


class LeNet(torch.nn.Module):
	#構造函數,定義網絡結構
	def __init__(self):
		super(LeNet,self).__init__()
		#卷基層1,1個輸入通道,6個輸出通道,卷積核5*5
		self.conv1=torch.nn.Conv2d(1,6,kernel_size=5,padding=2)
		#卷基層2,6個輸入通道,16個輸出通道,卷積核5*5
		self.conv2=torch.nn.Conv2d(6,16,5)
		#全連接層
		self.fc1=torch.nn.Linear(16*5*5,120)
		self.fc2=torch.nn.Linear(120,84)
		self.fc3=torch.nn.Linear(84,10)

	#前向傳播函數
	def forward(self,x):
		#卷積->激活->最大池化
		x=torch.nn.MaxPool2d(torch.nn.ReLU(self.conv1(x)),(2,2))
		#卷積->最大池化
		x=torch.nn.MaxPool2d(self.conv2(2),(2,2))
		x=x.view(x.size(0),-1)
		x=torch.nn.ReLU(self.fc1(x))
		x=torch.nn.ReLU(self.fc2(x))
		x=self.fc3(x)

		retun x 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章