Tensorflow之神經網絡擬合函數(python)

相關視頻效果可見我的Qzone https://user.qzone.qq.com/707101557

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import math
 
def add_layer(inputs, in_size, out_size, activation_function=None):
    # add one more layer and return the output of this layer
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]))
    Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs
 
 #Make up some real data
x_data = np.linspace(-math.pi,math.pi,300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
shape = x_data.shape
y_data = np.empty(shape, dtype = float)
for i in range(shape[0]):
    y_data[i]=((0.7*x_data[i]+0.5)*x_data[i]+1.7)*x_data[i]+0.7+noise[i]
print(x_data.shape)
print(y_data.shape)

#shape =[14,1]
#x_data = np.empty(shape, dtype = float)
#y_data = np.empty(shape, dtype = float)
#x_data[0] = 97;
#x_data[1] = 105;
#x_data[2] = 113;
#x_data[3] = 129;
#x_data[4] = 137;
#x_data[5] = 153;
#x_data[6] = 177;
#x_data[7] = 209;
#x_data[8] = 217;
#x_data[9] = 249;
#x_data[10] = 265;
#x_data[11] = 281;
#x_data[12] = 289;
#x_data[13] = 305;

#y_data[0]= 1.60470
#y_data[1]= 1.69560
#y_data[2] = 2.77090	
#y_data[3] = 6.04320
#y_data[4] =2.04390
#y_data[5] = 5.90250
#y_data[6]=5.61030	
#y_data[7] = 6.04320	
#y_data[8] = 5.56600	
#y_data[9] = 3.80150
#y_data[10]= 3.57560
#y_data[11] = 2.11480
#y_data[12] = 2.07970	
#y_data[13] =1.57680

_sum = 0.0
_max = 0.0
for i in range(shape[0]):
    _sum+=y_data[i]

_mean = _sum/shape[0]

for i in range(shape[0]):
    y_data[i]-=_mean

for i in range(shape[0]):
    if (_max<np.abs(y_data[i])):
        _max = np.abs(y_data[i])

_max*=1.1
for i in range(shape[0]):
     y_data[i]/=_max

#print(x_data.shape)
#print(y_data.shape)


# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, shape)
ys = tf.placeholder(tf.float32, shape)
# add hidden layer
#l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
l1 = add_layer(xs, 1, 100, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 100, 1, activation_function=tf.nn.tanh)
 
loss = tf.reduce_mean(tf.square(ys - prediction))
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
 
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
 
# plot the real data
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data, y_data)
# Interactive mode on
plt.ion()
plt.show()
 
for i in range(50000):
    # training
    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 50 == 0:
        # to visualize the result and remove the previous line 
        try:
            ax.lines.remove(lines[0])
        except Exception:
            pass
        prediction_value = sess.run(prediction, feed_dict={xs: x_data})
        # plot the prediction
        lines = ax.plot(x_data, prediction_value, 'r-', lw=5)
        plt.pause(0.05)

以下是一篇廣告,歡迎購買Python機器學習課程

https://study.163.com/course/courseMain.htm?courseId=1005735048&_trace_c_p_k2_=92e5b9d612de4e53b80b8883588909f3

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章