[模式分類] 三維高斯分佈數據訓練三層神經網絡實現分類

neuralNetwork.m

clear;
% 均值 0 0 0
MU1 = [0 0 0];
% 協方差矩陣 I
SIGMA1 = [ 1 0 0; 0 1 0; 0 0 1 ];
DATA1 = mvnrnd(MU1, SIGMA1, 1000);

% scatter3(DATA1(:, 1), DATA1(:, 2), DATA1(:, 3))

% 均值 0 1 0
MU2 = [0 1 0];
% 協方差矩陣 I
SIGMA2 = [ 1 0 1; 0 2 2; 1 2 5 ];
DATA2 = mvnrnd(MU2, SIGMA2, 1000);

% 均值 -1 0 1
MU3 = [-1 0 1];
% 協方差矩陣 I
SIGMA3 = [ 2 0 0; 0 6 0; 0 0 1 ];
DATA3 = mvnrnd(MU3, SIGMA3, 1000);

% 均值 0 0.5 1
MU4 = [0 0.5 1];
% 協方差矩陣 I
SIGMA4 = [ 2 0 0; 0 1 0; 0 0 3 ];
DATA4 = mvnrnd(MU4, SIGMA4, 1000);

% 計算標記樣本在各分佈的概率分佈函數
Y1 = probability(DATA1, MU1, SIGMA1);
Y2 = probability(DATA1, MU2, SIGMA2);
Y3 = probability(DATA1, MU3, SIGMA3);
Y4 = probability(DATA1, MU4, SIGMA4);
Y = [Y1 Y2 Y3 Y4];
Z = sum(Y, 2);
% 計算實際分類概率
for i = 1:length(Y)
    for j = 1:4
        Y(i, j) = Y(i, j) / Z(i);
    end
end
[LABEL, LABELIDX] = max(Y, [], 2);
% 初始化輸入層與隱層之間的權值矩陣
W1 = 2*rand(3)-1;
% 初始化經過激活的隱層與輸出層之間的權值矩陣
W2 = 2*rand(3, 4)-1;
% 偏置B1,B2
B1 = 2*rand(1)-1;
B2 = 2*rand(1)-1;

learningrate = 0.01;
for m = 1:50
    for i = 1:length(DATA1)
        % 前向傳播
        % 網絡結構採用3-3-4結構,最後用softmax激活
        NEURON1 = DATA1(i, :) * W1 + B1;
        NEURON1_ACTIVE = sigmoid(NEURON1);
        NEURON2 = NEURON1_ACTIVE * W2 + B2;
        OUTPUT = softmax(NEURON2);

        % 反向傳播
        % 計算實際根據概率分佈的概率值
        % 計算交叉熵的導數
        loss = OUTPUT - Y(i, :);
        softmaxloss = softmax_loss(NEURON2) .* loss;
        % 計算權重矩陣W2的損失
        W2_LOSS = [NEURON1_ACTIVE' NEURON1_ACTIVE' NEURON1_ACTIVE' NEURON1_ACTIVE'];
        for j = 1:3
            W2_LOSS(j, :) = W2_LOSS(j, :) .* softmaxloss;
        end
        % 計算隱層的損失
        NEURON_ACTIVE_LOSSTEMP = W2;
        for j = 1:3
            NEURON_ACTIVE_LOSSTEMP(j, :) = W2(j, :) .* softmaxloss;
        end
        NEURON_ACTIVE_LOSS = sum(NEURON_ACTIVE_LOSSTEMP, 2);
        NEURON1_LOSS = sigmoid_loss(NEURON1) .* NEURON_ACTIVE_LOSS';
        % 計算權重矩陣W1的損失
        W1_LOSS = [ DATA1(i, :)' DATA1(i, :)' DATA1(i, :)' ];
        for j = 1:3
            W1_LOSS(j, :) = W1_LOSS(j, :) .* NEURON1_LOSS;
        end

        % 更新權值
        B1 = B1 - sum(NEURON1_LOSS, 2) * learningrate;
        W1 = W1 - W1_LOSS * learningrate;
        B2 = B2 - sum(NEURON_ACTIVE_LOSS) * learningrate;
        W2 = W2 - W2_LOSS * learningrate;
    end
end
value = 0;
for i = 1:length(DATA1)
    % 前向傳播
    NEURON1 = DATA1(i, :) * W1 + B1;
    NEURON1_ACTIVE = sigmoid(NEURON1);
    NEURON2 = NEURON1_ACTIVE * W2 + B2;
    OUTPUT = softmax(NEURON2);
    [tLABEL, tLABELIDX] = max(OUTPUT, [], 2);
    if tLABELIDX == LABELIDX(i)
        value = value + 1;
    end
end
RATE = value / 1000;

probability.m

function output = probability( X, MU1, SIGMA1 )
%PROBILITY Summary of this function goes here
%   Detailed explanation goes here

MU1_DIAG = diag(MU1);
output = [];
for i = 1:length(X)
    Y1 = prod( diag( normcdf( diag(X(i, :)), MU1_DIAG, SIGMA1 ) ) );
    output = [output; Y1];
end

end

sigmoid.m

function output = sigmoid( x )
%SIGMOID Summary of this function goes here
%   Detailed explanation goes here
output = 1./(1+exp(-x));
end

sigmoid_loss.m

function output = sigmoid_loss( x )
%SIGMOID_LOSS Summary of this function goes here
%   Detailed explanation goes here
output = exp(-x) ./ (1+exp(-x));

end

softmax.m

function output = softmax( x )
%SOFTMAX Summary of this function goes here
%   Detailed explanation goes here
output = exp(x) / sum(exp(x));
end

softmax_loss.m

function output = softmax_loss( x )
%SOFT Summary of this function goes here
%   Detailed explanation goes here
output = exp(x) / sum(exp(x)) + exp(x).^2 / sum(exp(x)).^2;

end

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章