TensorFlow 全連接層 卷積層 池化層 單向lstm 雙向lstm 的基本操作

首先導包

import tensorflow as tf
import tensorflow.contrib as contrib
import tensorflow.layers as layer
import tensorflow.nn as nn

全連接層

batch = 8
data_dim = 10
out_dim = 15
data = tf.Variable(tf.random_uniform([batch, data_dim]))
fc_out = contrib.layers.fully_connected(data, out_dim)
dense_out = layer.dense(data, out_dim)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
out = sess.run(dense_out)
# print(out)
print(out.shape)

卷積層 池化層

batch = 8
image_h = 10
image_w = 10
image_channel = 1
out_channel = 5
cnn_kernel_size_h = 3
cnn_kernel_size_w = 3
cnn_stride = 1
pool_size_h = 2
pool_size_w = 2
pool_stride = 1
images = tf.Variable(tf.random_uniform([batch, image_h, image_w, image_channel]))
cnn_out = contrib.layers.conv2d(images, out_channel, [cnn_kernel_size_h, cnn_kernel_size_w], cnn_stride, padding='SAME')
# cnn_out = layer.conv2d(images, out_channel, [cnn_kernel_size_h, cnn_kernel_size_w], cnn_stride, padding='SAME')
pool_out = contrib.layers.max_pool2d(cnn_out, [pool_size_h, pool_size_w], stride=pool_stride, padding='SAME')
# pool_out = contrib.layers.avg_pool2d(cnn_out, [pool_size_h, pool_size_w], stride=pool_stride, padding='SAME')
# pool_out = tf.nn.max_pool(cnn_out, [1, pool_size_h, pool_size_w, 1], strides=[1, pool_stride, pool_stride, 1],
#                           padding='SAME')
# pool_out = tf.nn.avg_pool(cnn_out, [1, pool_size_h, pool_size_w, 1], strides=[1, pool_stride, pool_stride, 1],
#                           padding='SAME')
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
out = sess.run(pool_out)
# print(out)
print(out.shape)

單向lstm batch爲data第一維度

batch = 8
step = 10
vector = 50
hidden = 7
data = tf.Variable(tf.random_uniform([batch, step, vector]))
lstm_cell = contrib.rnn.BasicLSTMCell(hidden, forget_bias=1.0, state_is_tuple=False)
# lstm_cell = nn.rnn_cell.BasicLSTMCell(hidden, forget_bias=1.0, state_is_tuple=False)
init_state = lstm_cell.zero_state(step, dtype=tf.float32)
rnn_out, final_state = nn.dynamic_rnn(lstm_cell, data, initial_state=init_state, time_major=True)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
out, state = sess.run([rnn_out, final_state])
# print(out, state)
print(out.shape, state.shape)

雙向lstm batch爲data第一維度

batch = 8
step = 10
vector = 50
hidden = 7
data = tf.Variable(tf.random_uniform([batch, step, vector]))
lstm_cell = contrib.rnn.BasicLSTMCell(hidden, forget_bias=1.0, state_is_tuple=False)
# lstm_cell = nn.rnn_cell.BasicLSTMCell(hidden, forget_bias=1.0, state_is_tuple=False)
init_state = lstm_cell.zero_state(step, dtype=tf.float32)
rnn_out, final_state = nn.bidirectional_dynamic_rnn(lstm_cell, lstm_cell, data, initial_state_fw=init_state,
                                                    initial_state_bw=init_state, time_major=True)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
out, state = sess.run([rnn_out, final_state])
out = tf.concat(out, 1)
state = tf.concat(state, 1)
# print(out, state)
print(out.shape, state.shape)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章