Tensorflow實現自編碼器

#encoding: utf-8
'''
AutoEncoder:使用自身的高階編碼器來提取特徵,自編碼器其實也是一種神經網絡,
它的輸入和輸出是一致的它藉助稀疏編碼的思想,目標是使用稀疏的一些高階特徵
重新組合來重構自己。
特點:①期望輸入/輸出一致;②用高階特徵來重構自己,不是複製像素點
'''

import numpy as np 
import sklearn.preprocessing as prep 
import tensorflow as tf 
from tensorflow.examples.tutorials.mnist import input_data

# 載入MNIST數據
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)


# 標準的均勻分佈的Xavier初始化器 fan_in:輸入節點的數量 fan_out:輸出節點的數量
def xavier_init(fan_in, fan_out, constant=1):
	low = -constant * np.sqrt(6.0/(fan_in+fan_out))
	high=  constant * np.sqrt(6.0/(fan_in+fan_out))
	return tf.random_uniform((fan_in,fan_out), minval=low, maxval=high, dtype=tf.float32)


class AdditiveGaussianNoiseAutoencoder(object):
	
	def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, 
					optimizer=tf.train.AdamOptimizer(), scale=0.1):

		self.n_input = n_input #輸入變量數
		self.n_hidden = n_hidden #隱含層結點數
		self.transfer = transfer_function #隱含層激活函數 默認爲softplus
		self.scale = tf.placeholder(tf.float32) 
		self.training_scale = scale #高斯噪聲係數
		network_weights = self._initialize_weights() #參數初始化
		self.weights = network_weights

		# 定義網絡結構
		self.x = tf.placeholder(tf.float32, [None, self.n_input])
		# 將輸入x加入噪聲
		# 將加了噪聲的輸入與隱含層的權重w1相乘 再加上隱含層的偏置b1
		# 激活函數處理
		self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale*tf.random_normal((n_input,)),
									self.weights['w1']) , self.weights['b1']))
		# 經過隱含層在輸出層進行數據復原、重建
		self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
		
		# 定義自編碼函數的損失函數
		self.cost = 0.5*tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
		# 定義優化器
		self.optimizer = optimizer.minimize(self.cost)
		init = tf.global_variables_initializer()
		self.sess = tf.Session()
		self.sess.run(init)

	# 參數初始化函數
	def _initialize_weights(self):
		all_weights = {}
		all_weights['w1'] = tf.Variable(xavier_init(self.n_input,self.n_hidden))
		all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
		all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
		all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
		return all_weights

	# 定義訓練函數
	def partial_fit(self, X):
		cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict = {self.x:X, self.scale:self.training_scale})
		return cost

	# 求損失函數的函數 在訓練完成後在測試集上你你你你你你你你你對模型性能進行評測
	def calc_total_cost(self, X):
		return self.sess.run(self.cost, feed_dict = {self.x:X, self.scale:self.training_scale})

	# transform函數 返回隱含層的輸出結果 學習出的數據中的高階特徵
	def transform(self, X):
		return self.sess.run(self.hidden, feed_dict = {self.x:X, self.scale:self.training_scale})

	# 將隱含層的輸出結果作爲輸入 通過之後重建層將高階特徵復原爲原始數據
	def generate(self, hidden=None):
		if hidden==None:
			hidden = np.random.normal(size=self.weights['b1'])

		return self.sess.run(self.reconstruction, feed_dict = {self.hidden:hidden})

	# reconstruct函數 整體運行一遍復原過程 包括提取高階特徵和通過高階特徵復原數據
	def reconstruct(self, X):
		return self.sess.run(self.reconstruction, feed_dict = {self.x:X, self.scale:self.training_scale})

	# 獲取權重w1
	def getWeights(self):
		return self.sess.run(self.weights['w1'])
		
	def getBiases(self):
		return self.sess.run(self.weights['b1'])


# 對訓練、測試數據進行標準化處理 讓數據變成0均值 標準差爲1的分佈
def standard_scale(X_train, X_test):
	preprocessor = prep.StandardScaler().fit(X_train)
	X_train = preprocessor.transform(X_train)
	X_test = preprocessor.transform(X_test)
	
	return X_train, X_test

#獲取隨機block數據 不放回抽樣 取一個0--len(data)-batch_size之間的整數作爲block的起始位置
def get_random_block_from_data(data, batch_size):
	start_index = np.random.randint(0, len(data)-batch_size)
	return data[start_index:(start_index+batch_size)]

# 數據標準化處理
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)

n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1

autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784, n_hidden=200,
												transfer_function=tf.nn.softplus,
												optimizer = tf.train.AdamOptimizer(learning_rate=0.001),
												scale=0.01)

for epoch in range(training_epochs):
	avg_cost = 0.
	total_batch = int(n_samples/batch_size)
	for i in range(total_batch):
		batch_xs = get_random_block_from_data(X_train, batch_size)

		cost = autoencoder.partial_fit(batch_xs)
		avg_cost += cost / n_samples * batch_size

	if epoch%display_step == 0:
		print('Epoch:', '%04d' % (epoch+1), 'cost=','{:.9f}'.format(avg_cost))

print('Total cost: ' + str(autoencoder.calc_total_cost(X_test)))


自編碼器是一種無監督學習的方法,目的在於提取數據中最有用、最頻繁出現的高階特徵,根據這些特徵重構數據

內容源自《Tensorflow實戰》


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章