import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'"""Simple tutorial using code from the TensorFlow example for Regression.
Parag K. Mital, Jan. 2016"""# pip3 install --upgrade# https://storage.googleapis.com/tensorflow/mac/tensorflow-0.6.0-py3-none-any.whl# %%import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import numpy as np
import matplotlib.pyplot as plt
# %%get the classic mnist dataset# one-hot means a sparse vector for every observation where only# the class label is 1, and every other class is 0.# more info here:# https://www.tensorflow.org/versions/0.6.0/tutorials/mnist/download/index.html#dataset-object
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)# %% mnist is now a DataSet with accessors for:# 'train', 'test', and 'validation'.# within each, we can access:# images, labels, and num_examplesprint(mnist.train.num_examples,
mnist.test.num_examples,
mnist.validation.num_examples)# %% the images are stored as:# n_observations x n_features tensor (n-dim array)# the labels are stored as n_observations x n_labels,# where each observation is a one-hot vector.print(mnist.train.images.shape, mnist.train.labels.shape)# %% the range of the values of the images is from 0-1print(np.min(mnist.train.images), np.max(mnist.train.images))# %% we can visualize any one of the images by reshaping it to a 28x28 image
plt.imshow(np.reshape(mnist.train.images[100,:],(28,28)), cmap='gray')# plt.show()# 控制檯55000100005000(55000,784)(55000,10)0.01.0
# %% We can create a container for an input image using tensorflow's graph:# We allow the first dimension to be None, since this will eventually# represent our mini-batches, or how many images we feed into a network# at a time during training/validation/testing.# The second dimension is the number of features that the image has.
n_input =784
n_output =10
net_input = tf.placeholder(tf.float32,[None, n_input])# %% We can write a simple regression (y = W*x + b) as:
W = tf.Variable(tf.zeros([n_input, n_output]))
b = tf.Variable(tf.zeros([n_output]))
net_output = tf.nn.softmax(tf.matmul(net_input, W)+ b)# %% We'll create a placeholder for the true output of the network
y_true = tf.placeholder(tf.float32,[None,10])# %% And then write our loss function:
cross_entropy =-tf.reduce_sum(y_true * tf.log(net_output))# %% This would equate each label in our one-hot vector between the# prediction and actual using the argmax as the predicted label
correct_prediction = tf.equal(tf.argmax(net_output,1),
tf.argmax(y_true,1))# %% And now we can look at the mean of our network's correct guesses
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float"))# %% We can tell the tensorflow graph to train w/ gradient descent using# our loss function and an input learning rate
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)# %% We now create a new session to actually perform the initialization the variables:
sess = tf.Session()
sess.run(tf.global_variables_initializer())# %% Now actually do some training:
batch_size =100
n_epochs =10for epoch_i inrange(n_epochs):for batch_i inrange(mnist.train.num_examples // batch_size):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
net_input: batch_xs,
y_true: batch_ys
})print(sess.run(accuracy,
feed_dict={
net_input: mnist.validation.images,
y_true: mnist.validation.labels
}))# %% Print final test accuracy:print(sess.run(accuracy,
feed_dict={
net_input: mnist.test.images,
y_true: mnist.test.labels
}))# 控制檯0.920.9190.9220.9180.9170.92540.92260.9250.92220.919=======0.9171
# We could do the same thing w/ Keras like so:from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD
model = Sequential()
model.add(Dense(output_dim=10, input_dim=784, init='zero'))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate))
model.fit(mnist.train.images, mnist.train.labels, nb_epoch=n_epochs,
batch_size=batch_size, show_accuracy=True)
objective_score = model.evaluate(mnist.test.images, mnist.test.labels,
batch_size=100, show_accuracy=True)"""
Keras感知機識別MNIST最新方法
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
batch_size =128
num_classes =10
epochs =20# the data, split between train and test sets(x_train, y_train),(x_test, y_test)= mnist.load_data()
x_train = x_train.reshape(60000,784)
x_test = x_test.reshape(10000,784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /=255
x_test /=255print(x_train.shape[0],'train samples')print(x_test.shape[0],'test samples')# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)---------------------
作者:Klay Ye
來源:CSDN
原文:https://blog.csdn.net/weixin_34275246/article/details/88970278
版權聲明:本文爲博主原創文章,轉載請附上博文鏈接!