機器學習:集成學習

一、問題描述

利用SVM、KNN、bp神經網絡等算法進行集成學習,基於MNIST數據集進行手寫識別的訓練和測試。

二、算法核心思想分析

集成學習是將幾個弱分類器結合起來,得到更好的分類結果。使用SVM、KNN和bp神經網絡分別訓練,將分類結果進行投票,得出最後集成分類器的結果。

三、題目分析

首先讀取MNIST數據集,分別對分類器進行訓練,測試時,將三個分類器的結果進行投票,最終得出的結果即爲集成分類器的結果。其中SVM使用LibSVM。

四、代碼及運行結果

mnist_reader.py

# -*- coding: utf-8 -*-
import numpy as np
import struct
import matplotlib.pyplot as plt

# 訓練集文件
train_images_idx3_ubyte_file = 'mnist/train-images.idx3-ubyte'
# 訓練集標籤文件
train_labels_idx1_ubyte_file = 'mnist/train-labels.idx1-ubyte'

# 測試集文件
test_images_idx3_ubyte_file = 'mnist/t10k-images.idx3-ubyte'
# 測試集標籤文件
test_labels_idx1_ubyte_file = 'mnist/t10k-labels.idx1-ubyte'


def decode_idx3_ubyte(idx3_ubyte_file):
    """
    解析idx3文件的通用函數
    :param idx3_ubyte_file: idx3文件路徑
    :return: 數據集
    """
    # 讀取二進制數據
    bin_data = open(idx3_ubyte_file, 'rb').read()

    # 解析文件頭信息,依次爲魔數、圖片數量、每張圖片高、每張圖片寬
    offset = 0
    fmt_header = '>iiii'
    magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
    print('魔數:%d, 圖片數量: %d張, 圖片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))

    # 解析數據集
    image_size = num_rows * num_cols
    offset += struct.calcsize(fmt_header)
    fmt_image = '>' + str(image_size) + 'B'
    images = np.empty((num_images, num_rows, num_cols))
    for i in range(num_images):
        if (i + 1) % 10000 == 0:
            print('已解析 %d' % (i + 1) + '張')
        images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
        offset += struct.calcsize(fmt_image)
    return images


def decode_idx1_ubyte(idx1_ubyte_file):
    """
    解析idx1文件的通用函數
    :param idx1_ubyte_file: idx1文件路徑
    :return: 數據集
    """
    # 讀取二進制數據
    bin_data = open(idx1_ubyte_file, 'rb').read()

    # 解析文件頭信息,依次爲魔數和標籤數
    offset = 0
    fmt_header = '>ii'
    magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
    print('魔數:%d, 圖片數量: %d張' % (magic_number, num_images))

    # 解析數據集
    offset += struct.calcsize(fmt_header)
    fmt_image = '>B'
    labels = np.empty(num_images)
    for i in range(num_images):
        if (i + 1) % 10000 == 0:
            print('已解析 %d' % (i + 1) + '張')
        labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
        offset += struct.calcsize(fmt_image)
    return labels


def load_train_images(idx_ubyte_file=train_images_idx3_ubyte_file):
    """
    TRAINING SET IMAGE FILE (train-images-idx3-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000803(2051) magic number
    0004     32 bit integer  60000            number of images
    0008     32 bit integer  28               number of rows
    0012     32 bit integer  28               number of columns
    0016     unsigned byte   ??               pixel
    0017     unsigned byte   ??               pixel
    ........
    xxxx     unsigned byte   ??               pixel
    Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).

    :param idx_ubyte_file: idx文件路徑
    :return: n*row*col維np.array對象,n爲圖片數量
    """
    return decode_idx3_ubyte(idx_ubyte_file)


def load_train_labels(idx_ubyte_file=train_labels_idx1_ubyte_file):
    """
    TRAINING SET LABEL FILE (train-labels-idx1-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000801(2049) magic number (MSB first)
    0004     32 bit integer  60000            number of items
    0008     unsigned byte   ??               label
    0009     unsigned byte   ??               label
    ........
    xxxx     unsigned byte   ??               label
    The labels values are 0 to 9.

    :param idx_ubyte_file: idx文件路徑
    :return: n*1維np.array對象,n爲圖片數量
    """
    return decode_idx1_ubyte(idx_ubyte_file)


def load_test_images(idx_ubyte_file=test_images_idx3_ubyte_file):
    """
    TEST SET IMAGE FILE (t10k-images-idx3-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000803(2051) magic number
    0004     32 bit integer  10000            number of images
    0008     32 bit integer  28               number of rows
    0012     32 bit integer  28               number of columns
    0016     unsigned byte   ??               pixel
    0017     unsigned byte   ??               pixel
    ........
    xxxx     unsigned byte   ??               pixel
    Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).

    :param idx_ubyte_file: idx文件路徑
    :return: n*row*col維np.array對象,n爲圖片數量
    """
    return decode_idx3_ubyte(idx_ubyte_file)


def load_test_labels(idx_ubyte_file=test_labels_idx1_ubyte_file):
    """
    TEST SET LABEL FILE (t10k-labels-idx1-ubyte):
    [offset] [type]          [value]          [description]
    0000     32 bit integer  0x00000801(2049) magic number (MSB first)
    0004     32 bit integer  10000            number of items
    0008     unsigned byte   ??               label
    0009     unsigned byte   ??               label
    ........
    xxxx     unsigned byte   ??               label
    The labels values are 0 to 9.

    :param idx_ubyte_file: idx文件路徑
    :return: n*1維np.array對象,n爲圖片數量
    """
    return decode_idx1_ubyte(idx_ubyte_file)


def run():
    train_images = load_train_images()
    train_labels = load_train_labels()
    # test_images = load_test_images()
    # test_labels = load_test_labels()

    # zipper = zip(train_images, train_labels)
    # for x, y in zipper:
    #     print((x, y))

    # 查看前十個數據及其標籤以讀取是否正確
    len = 10
    for i in range(len):
        print("label\n", train_labels[i])
        print("image\n", train_images[i])
        plt.subplot((len-1)/5 + 1, 5, i+1)
        plt.imshow(train_images[i], cmap='gray')
        # plt.show()
    plt.show()
    print('done')


def getdataset():
    X_train = load_train_images()
    y_train = load_train_labels()
    X_test = load_test_images()
    y_test = load_test_labels()
    return X_train, y_train, X_test, y_test


def showMinist():
    y_train = load_train_labels()
    X_train = load_train_images()
    # 看看數據集中的一些樣本:每個類別展示一些
    classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    num_classes = len(classes)
    samples_per_class = 7
    for y, cls in enumerate(classes):
        idxs = np.flatnonzero(y_train == y)
        # 一個類別中挑出一些
        idxs = np.random.choice(idxs, samples_per_class, replace=False)
        for i, idx in enumerate(idxs):
            plt_idx = i * num_classes + y + 1
            plt.subplot(samples_per_class, num_classes, plt_idx)
            plt.imshow(X_train[idx].astype('uint8'))
            plt.axis('off')
            if i == 0:
                plt.title(cls)
    plt.show()


if __name__ == '__main__':
    # run()
    showMinist()

svm.py

from libsvm.python.svmutil import *
from libsvm.python.svm import *
import mnist_reader as reader
import numpy as np
import random


class SVM(object):
    def __init__(self):
        self.X_train, self.y_train, self.X_test, self.y_test = reader.getdataset()
        self.X_train = self.X_train/256
        self.X_test = self.X_test/256
        self.y_train = self.y_train.astype(int)
        self.y_test = self.y_test.astype(int)

    def dropsomepicture(self, trainm=5000, testm=500):
        # 下采樣
        num_training = trainm
        mask = range(num_training)
        self.X_train = self.X_train[mask]
        self.y_train = self.y_train[mask]

        num_test = testm
        mask = range(num_test)
        self.X_test = self.X_test[mask]
        self.y_test = self.y_test[mask]

        # 預處理:把數據展成一列
        self.X_train = np.reshape(self.X_train, (self.X_train.shape[0], -1))
        self.X_test = np.reshape(self.X_test, (self.X_test.shape[0], -1))

    def train(self):
        train_images = self.X_train.tolist()
        train_labels = self.y_train.tolist()
        test_images = self.X_test.tolist()
        test_labels = self.y_test.tolist()

        self.model = svm_train(train_labels, train_images)

    def test(self):
        test_images = self.X_test.tolist()
        test_labels = self.y_test.tolist()
        p_label, p_acc, p_val = svm_predict(test_labels, test_images, self.model)
        print(p_acc)

    def predict(self, label, image, model):
        p_label, p_acc, p_val = svm_predict(label, image, model)
        return int(p_label[0])


if __name__ == '__main__':
    svm = SVM()
    # svm.dropsomepicture(5000, 10000)
    # svm.train()
    # svm.test()

    ## predict
    svm.dropsomepicture(5000, 10)
    svm.train()
    test_images = svm.X_test.tolist()
    test_labels = svm.y_test.tolist()
    svm_label = [test_labels[0]]
    svm_image = [test_images[0]]
    print(svm_label)
    print(svm.predict(svm_label, svm_image, svm.model))

knn.py

from shaomingshan.classifiers.k_nearest_neighbor import KNearestNeighbor
import numpy as np
import matplotlib.pyplot as plt
import mnist_reader as reader
import time


def time_function(f, *args):
    tic = time.time()
    f(*args)
    toc = time.time()
    return toc - tic


class KNN(object):
    def __init__(self):
        self.X_train, self.y_train, self.X_test, self.y_test = reader.getdataset()
        self.y_train = self.y_train.astype(int)
        self.y_test = self.y_test.astype(int)

    def dropsomepicture(self, trainm=5000, testm=500):
        # 下采樣
        num_training = trainm
        mask = range(num_training)
        self.X_train = self.X_train[mask]
        self.y_train = self.y_train[mask]

        num_test = testm
        mask = range(num_test)
        self.X_test = self.X_test[mask]
        self.y_test = self.y_test[mask]

    def show(self):
        # 加載數據集
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test

        print('Training data shape: ', X_train.shape)
        print('Training labels shape: ', y_train.shape)
        print('Test data shape: ', X_test.shape)
        print('Test labels shape: ', y_test.shape)

        # 咱們把圖像數據展開成一個向量的形式
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        print(
        X_train.shape, X_test.shape)
        # 調用一下KNN分類器,然後訓練(其實就是把樣本都記下來)
        classifier = KNearestNeighbor()
        classifier.train(X_train, y_train)

        # 一層for循環的方式,高效一點
        dists_two = classifier.compute_distances_no_loops(X_test)

        no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
        print('No loop version took %f seconds' % no_loop_time)
        pass

    # 交叉驗證
    def num_folds_cross(self):
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        num_folds = 5
        k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]

        X_train_folds = []
        y_train_folds = []

        idxes = range(len(X_train))
        idx_folds = np.array_split(idxes, num_folds)
        for idx in idx_folds:
            #     mask = np.ones(num_training, dtype=bool)
            #     mask[idx] = False
            #     X_train_folds.append( (X_train[mask], X_train[~mask]) )
            #     y_train_folds.append( (y_train[mask], y_train[~mask]) )
            X_train_folds.append(X_train[idx])
            y_train_folds.append(y_train[idx])

        k_to_accuracies = {}

        import sys
        classifier = KNearestNeighbor()
        Verbose = False
        for k in k_choices:
            if Verbose:
                print("processing k=%f" % k)
            else:
                sys.stdout.write('.')
            k_to_accuracies[k] = list()
            for num in range(num_folds):
                if Verbose:
                    print("processing fold#%i/%i" % (num, num_folds))

                X_cv_train = np.vstack([X_train_folds[x] for x in range(num_folds) if x != num])
                y_cv_train = np.hstack([y_train_folds[x].T for x in range(num_folds) if x != num])

                X_cv_test = X_train_folds[num]
                y_cv_test = y_train_folds[num]

                # 訓練KNN分類器
                classifier.train(X_cv_train, y_cv_train)

                # 計算和訓練集之間圖片的距離
                dists = classifier.compute_distances_no_loops(X_cv_test)

                y_cv_test_pred = classifier.predict_labels(dists, k=k)
                # 計算和預測
                num_correct = np.sum(y_cv_test_pred == y_cv_test)
                k_to_accuracies[k].append(float(num_correct) / y_cv_test.shape[0])

        # 輸出計算的準確率
        for k in sorted(k_to_accuracies):
            for accuracy in k_to_accuracies[k]:
                print(
                'k = %d, accuracy = %f' % (k, accuracy))
        for k in k_choices:
          accuracies = k_to_accuracies[k]
          plt.scatter([k] * len(accuracies), accuracies)

        accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
        accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
        plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
        plt.title('Cross-validation on k')
        plt.xlabel('k')
        plt.ylabel('Cross-validation accuracy')
        plt.show()

    def test(self,best_k = 3):
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        classifier = KNearestNeighbor()
        classifier.train(X_train, y_train)
        y_test_pred = classifier.predict(X_test, k = best_k)

        # 輸出準確度
        num_correct = np.sum(y_test_pred == y_test)
        accuracy = float(num_correct) / len(X_test)
        print('Got %d / %d correct => accuracy: %f' % (num_correct, len(X_test), accuracy))

    def predictone(self, image, k=5):
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test
        X_test = np.array([image])
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        classifier = KNearestNeighbor()
        classifier.train(X_train, y_train)
        return classifier.predict(X_test, k)[0]

    def predictdist(self, dist, k=5):
        X_train, y_train, X_test, y_test = self.X_train, self.y_train, self.X_test, self.y_test
        X_test = np.array(dist)
        X_test = np.reshape(X_test, (X_test.shape[0], -1))
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        classifier = KNearestNeighbor()
        classifier.train(X_train, y_train)
        return classifier.predict(X_test, k)


if __name__ == '__main__':
    k = KNN()
    k.dropsomepicture(5000, 500)
    k.show()
    # k.num_folds_cross()
    k.test(1)
    # print(k.predictdist(k.X_test[0:5], 5))
    # print(k.y_test[0:5])

mnist_loader.py

"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data.  For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``.  In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""

# Libraries
# Standard library
import pickle
import gzip

# Third-party libraries
import numpy as np


def load_data():
    """Return the MNIST data as a tuple containing the training data,
    the validation data, and the test data.
    The ``training_data`` is returned as a tuple with two entries.
    The first entry contains the actual training images.  This is a
    numpy ndarray with 50,000 entries.  Each entry is, in turn, a
    numpy ndarray with 784 values, representing the 28 * 28 = 784
    pixels in a single MNIST image.
    The second entry in the ``training_data`` tuple is a numpy ndarray
    containing 50,000 entries.  Those entries are just the digit
    values (0...9) for the corresponding images contained in the first
    entry of the tuple.
    The ``validation_data`` and ``test_data`` are similar, except
    each contains only 10,000 images.
    This is a nice data format, but for use in neural networks it's
    helpful to modify the format of the ``training_data`` a little.
    That's done in the wrapper function ``load_data_wrapper()``, see
    below.
    """
    f = gzip.open('mnist.pkl.gz', 'rb')
    training_data, validation_data, test_data = pickle.load(f, encoding='bytes')
    f.close()
    return training_data, validation_data, test_data


def load_data_wrapper():
    """Return a tuple containing ``(training_data, validation_data,
    test_data)``. Based on ``load_data``, but the format is more
    convenient for use in our implementation of neural networks.
    In particular, ``training_data`` is a list containing 50,000
    2-tuples ``(x, y)``.  ``x`` is a 784-dimensional numpy.ndarray
    containing the input image.  ``y`` is a 10-dimensional
    numpy.ndarray representing the unit vector corresponding to the
    correct digit for ``x``.
    ``validation_data`` and ``test_data`` are lists containing 10,000
    2-tuples ``(x, y)``.  In each case, ``x`` is a 784-dimensional
    numpy.ndarry containing the input image, and ``y`` is the
    corresponding classification, i.e., the digit values (integers)
    corresponding to ``x``.
    Obviously, this means we're using slightly different formats for
    the training data and the validation / test data.  These formats
    turn out to be the most convenient for use in our neural network
    code."""
    tr_d, va_d, te_d = load_data()
    training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
    training_results = [vectorized_result(y) for y in tr_d[1]]
    training_data = zip(training_inputs, training_results)
    validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
    validation_data = zip(validation_inputs, va_d[1])
    test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
    test_data = zip(test_inputs, te_d[1])
    return training_data, validation_data, test_data


def vectorized_result(j):
    """Return a 10-dimensional unit vector with a 1.0 in the jth
    position and zeroes elsewhere.  This is used to convert a digit
    (0...9) into a corresponding desired output from the neural
    network."""
    e = np.zeros((10, 1))
    e[j] = 1.0
    return e

bp_network.py

import random
import numpy as np


class Network(object):

    def __init__(self, sizes):
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
        self.weights = [np.random.randn(y, x)
                        for x, y in zip(sizes[:-1], sizes[1:])]

    def feedforward(self, a):
        for b, w in zip(self.biases, self.weights):
            a = sigmoid(np.dot(w, a)+b)
        return a

    def SGD(self, training_data, epochs, mini_batch_size, eta,
            test_data=None):
        if test_data:
            test_data = list(test_data)
            n_test = len(test_data)
        training_data = list(training_data)
        n = len(training_data)
        for j in range(epochs):
            random.shuffle(training_data)
            mini_batches = [
                training_data[k:k+mini_batch_size]
                for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta)
            if test_data:
                print("Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test))
            else:
                print("Epoch {0} complete".format(j))

    def update_mini_batch(self, mini_batch, eta):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        for x, y in mini_batch:
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)
            nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
            nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
        self.weights = [w-(eta/len(mini_batch))*nw
                        for w, nw in zip(self.weights, nabla_w)]
        self.biases = [b-(eta/len(mini_batch))*nb
                       for b, nb in zip(self.biases, nabla_b)]

    def backprop(self, x, y):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        # feedforward
        activation = x
        activations = [x]  # list to store all the activations, layer by layer
        zs = []  # list to store all the z vectors, layer by layer
        for b, w in zip(self.biases, self.weights):
            z = np.dot(w, activation)+b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)
        # backward pass
        delta = self.cost_derivative(activations[-1], y) * \
            sigmoid_prime(zs[-1])
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())
        for l in range(2, self.num_layers):
            z = zs[-l]
            sp = sigmoid_prime(z)
            delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
        return nabla_b, nabla_w

    def evaluate(self, test_data):
        test_results = [(np.argmax(self.feedforward(x)), y)
                        for (x, y) in test_data]
        return sum(int(x == y) for (x, y) in test_results)

    def cost_derivative(self, output_activations, y):
        return output_activations-y


def sigmoid(z):
    return 1.0/(1.0+np.exp(-z))


def sigmoid_prime(z):
    return sigmoid(z)*(1-sigmoid(z))

ensemble_learning.py

from SVM import *
from KNN import *
import bp_network
import mnist_reader as reader
import mnist_loader as mnist_loader
import numpy as np


# SVM
svm = SVM()
svm.dropsomepicture(5000, 500)
svm.train()

# KNN
k = KNN()
k.dropsomepicture(5000, 500)

# bp network
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
net = bp_network.Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

X_train, y_train, X_test, y_test = reader.getdataset()
mask = 10000
X_test = X_test[range(mask)]
y_test = y_test[range(mask)]
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
test_images = X_test.tolist()
test_labels = y_test.tolist()

right = 0
for i in range(len(test_images)):
    svm_label = [test_labels[i]]
    svm_image = [test_images[i]]
    pre_svm = int(svm.predict(svm_label, svm_image, svm.model))
    pre_knn = int(k.predictone(test_images[i], 1))
    pre_bp = int(np.argmax(net.feedforward(X_test[i].reshape((784, 1)))))
    result = -1
    if pre_svm == pre_knn:
        result = pre_svm
    elif pre_svm == pre_bp:
        result = pre_svm
    elif pre_bp == pre_knn:
        result = pre_bp
    else:
        result = pre_bp

    if test_labels[i] == result:
        right += 1
        print(i, "true")
    else:
        print(i, "false")

print("Accuracy:", right/mask)

1、SVM

準確率90.86%

2、KNN

交叉驗證發現,k最佳值爲1

使用500張圖片測試結果爲90.6%

3、bp network

迭代30次,準確率爲94.98%

4、集成學習

準確率93.76%

五、總結

本實驗中SVM、KNN、神經網絡算法的精確度都不算差,所以單純進行投票的方式並不會有顯著提升。

 

如有錯誤請指正

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章