python numpy 從零實現循環神經網絡

1.Basic RNN

我們來看一下下面的循環神經網絡的圖,

在這裏插入圖片描述

分兩步來完成實現

(1)實現RNN的一個時間步所需要計算的東西。
(2)在時間步上實現一個循環,以便一次處理所有輸入。

1.1RNN cell

循環神經網絡可以看作是單元的重複,首先要實現單個時間步的計算,下圖描述了RNN單元的單個時間步的操作。

在這裏插入圖片描述

def rnn_cell(xt,a_prev,parameters):
    Wax = parameters["Wax"]
    Waa = parameters["Waa"]
    Wya = parameters["Wya"]
    ba = parameters["ba"]
    by = parameters["by"]
    
    a_next = np.tanh(np.dot(Waa,a_prev) + np.dot(Wax,xt) +ba)
    yt_pred = softmax(np.dot(Wya,a_next) + by)
    
    cache = (a_next, a_prev, xt, parameters)
    
    return a_next, yt_pred, cache

1.2前向傳播

RNN是剛剛構建的單元格的重複連接,如果輸入的數據序列經過10個時間步,那麼將複製RNN單元10次 。

def rnn_forward(x,a0,parameters):
    caches = []
    n_x, m, T_x = x.shape
    n_y, n_a = parameters["Wya"].shape
    
    a = np.zeros([n_a, m, T_x])
    y_pred = np.zeros([n_y, m, T_x])
    a_next = a0
    
    for t in range(T_x):
        a_next, yt_pred, cache = rnn_cell(x[:,:,t], a_next, parameters)
        
        a[:,:,t] = a_next
        y_pred[:,:,t] = yt_pred
        caches.append(cache)
        
    caches = (caches, x)
    
    return a, y_pred, caches

1.3反向傳播

[外鏈圖片轉存失敗,源站可能有防盜鏈機制,建議將圖片保存下來直接上傳(img-QxjQcoKx-1574580660969)(D:\jupyter_space\deeplearning.ai\5\Building a Recurrent Neural Network - Step by Step\images\rnn_cell_backprop.png)]

def rnn_cell_backward(da_next, cache):
    (a_next, a_prev, xt, parameters) = cache
    
    Wax = parameters["Wax"]
    Waa = parameters["Waa"]
    Wya = parameters["Wya"]
    ba = parameters["ba"]
    by = parameters["by"]
    
    dtanh = (1- a_next**2) * da_next

    dxt = np.dot(Wax.T, dtanh)
    dWax = np.dot(dtanh, xt.T)

    da_prev = np.dot(Waa.T, dtanh)
    dWaa = np.dot(dtanh, a_prev.T)

    dba = np.sum(dtanh, 1, keepdims=True)

    gradients = {"dxt": dxt, "da_prev": da_prev, "dWax": dWax, "dWaa": dWaa, "dba": dba}
    
    return gradients
def rnn_backward(da, caches):
    (caches, x) = caches
    (a1, a0, x1, parameters) = caches[0]
    
    n_a, m, T_x = da.shape
    n_x, m = x1.shape
    
    dx = np.zeros((n_x, m, T_x))
    dWax = np.zeros((n_a, n_x))
    dWaa = np.zeros((n_a, n_a))
    dba = np.zeros((n_a, 1))
    da0 = np.zeros((n_a, m))
    da_prevt = np.zeros((n_a, m))
    
    for t in reversed(range(T_x)):
        gradients = rnn_cell_backward(da[:,:, t] + da_prevt, caches[t])
        dxt, da_prevt, dWaxt, dWaat, dbat = gradients["dxt"], gradients["da_prev"], gradients["dWax"], gradients["dWaa"], gradients["dba"]
        dx[:, :, t] = dxt
        dWax += dWaxt
        dWaa += dWaat
        dba += dbat
        
    da0 = da_prevt

    gradients = {"dx": dx, "da0": da0, "dWax": dWax, "dWaa": dWaa,"dba": dba}
    
    return gradients

2.GRU

GRU前向傳播公式(簡化):
c^<t>=tanh(Wccc<t1>+Wcxx<t1>+bc)Γu=sigmoid(c^<t>)c<t>=Γuc^<t>+1Γuc<t1>c^<t>=tanh(Wccc<t1>+Wcxx<t1>+bc)Γu=sigmoid(c^<t>)c<t>=Γuc^<t>+1Γuc<t1> \hat{c}^{<t>}=tanh(W_{cc}c^{<t-1>}+W_{cx}x^{<t-1>}+b_c)\\\Gamma_u = sigmoid(\hat{c}^{<t>})\\c^{<t>}=\Gamma_u*\hat{c}^{<t>} +(1-\Gamma_u)c^{<t-1>}\hat{c}^{<t>}=tanh(W_{cc}c^{<t-1>}+W_{cx}x^{<t-1>}+b_c)\\\Gamma_u = sigmoid(\hat{c}^{<t>})\\c^{<t>}=\Gamma_u*\hat{c}^{<t>} +(1-\Gamma_u)c^{<t-1>}
GRU前向傳播公式(全部):

Γr=sigmoid(Wrcc<t1>+Wrxx<t1>+br)c^<t>=tanh(Wcc(Γrc<t1>)+Wcxx<t1>+bc)Γu=sigmoid(Wucc<t1>+Wuxx<t1>+bu)c<t>=Γuc^<t>+1Γuc<t1>a<t>=c<t> \Gamma_r=sigmoid(W_{rc}c^{<t-1>}+W_{rx}x^{<t-1>}+b_r)\\\hat{c}^{<t>}=tanh(W_{cc}(\Gamma_r*c^{<t-1>})+W_{cx}x^{<t-1>}+b_c)\\\Gamma_u = sigmoid(W_{uc}c^{<t-1>}+W_{ux}x^{<t-1>}+b_u)\\c^{<t>}=\Gamma_u*\hat{c}^{<t>} +(1-\Gamma_u)c^{<t-1>}\\a^{<t>}=c^{<t>}

2.1GRU cell

def gru_cell(xt,c_prev,parameters):
    Wcx = parameters["Wcx"]
    Wcc = parameters["Wcc"]
    Wyc = parameters["Wyc"]
    bc = parameters["bc"]
    by = parameters["by"]
    
    c_temp = np.tanh(np.dot(Wcc, c_prev) + np.dot(Wcx, xt) + bc)
    fu = sigmoid(c_temp)
    c_next = fu * c_temp +(1 - fu) *c_prev
    yt_pred = softmax(np.dot(Wyc, c_next) + by)
    
    cache = (c_next, c_prev, xt, parameters)
    return c_next, yt_pred, cache

2.2前向傳播

def gru_forward(x, c0, parameters):
    caches = []
    
    n_x, m, T_x = x.shape
    n_y, n_c = parameters["Wyc"].shape
    
    c = np.zeros([n_c, m, T_x])
    y_pred = np.zeros([n_y, m, T_x])

    c_next = c0
    
    for t in range(T_x):
        c_next, yt_pred, cache = gru_cell(x[:,:,t], c_next, parameters)      
        c[:,:,t] = c_next      
        y_pred[:,:,t] = yt_pred       
        caches.append(cache)
        
    caches = (caches, x)
    
    return c, y_pred, caches

3.LSTM

關於LSTM的詳細情況可以看https://blog.csdn.net/zhangbaoanhadoop/article/details/81952284

3.1LSTM cell

在這裏插入圖片描述

def lstm_cell(xt, a_prev, c_prev, parameters):
    Wf = parameters["Wf"]
    bf = parameters["bf"]
    Wi = parameters["Wi"]
    bi = parameters["bi"]
    Wc = parameters["Wc"]
    bc = parameters["bc"]
    Wo = parameters["Wo"]
    bo = parameters["bo"]
    Wy = parameters["Wy"]
    by = parameters["by"]
    
    n_x, m = xt.shape
    n_y, n_a = Wy.shape

    concat = np.zeros((n_a + n_x, m))
    concat[: n_a, :] = a_prev
    concat[n_a :, :] = xt
    
    ft = sigmoid(np.dot(Wf, concat) + bf)
    it = sigmoid(np.dot(Wi, concat) + bi)
    cct = np.tanh(np.dot(Wc, concat) + bc)
    c_next = ft * c_prev + it * cct 
    ot = sigmoid(np.dot(Wo, concat) + bo)
    a_next = ot * np.tanh(c_next)
    
    yt_pred = softmax(np.dot(Wy, a_next) + by)
    
    cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
    
    return a_next, c_next, yt_pred, cache

3.2前向傳播

我們已經實現了LSTM單元的一個時間步的前向傳播,現在我們要對LSTM網絡進行前向傳播進行計算 ,這部分與之前類似。

在這裏插入圖

def lstm_forward(x, a0, parameters):
    caches = []

    n_x, m, T_x = x.shape
    n_y, n_a = parameters["Wy"].shape

    a = np.zeros((n_a, m, T_x))
    c = a
    y = np.zeros((n_y, m, T_x))
    
    a_next = a0
    c_next = np.zeros(a_next.shape)
    
    for t in range(T_x):
        a_next, c_next, yt, cache = lstm_cell(x[:,:,t], a_next, c_next, parameters)
        a[:,:,t] = a_next
        y[:,:,t] = yt
        c[:,:,t]  = c_next
        caches.append(cache)
        
    caches = (caches, x)

    return a, y, c, caches

3.3反向轉播

門的導數
dΓot=danexttanh(cnext)Γot(1Γot)dc~t=dcnextΓit+Γot(1tanh(cnext)2)itdanextc~t(1tanh(c~)2)dΓut=dcnextc~t+Γot(1tanh(cnext)2)c~tdanextΓut(1Γut)dΓft=dcnextc~prev+Γot(1tanh(cnext)2)cprevdanextΓft(1Γft) d \Gamma_o^{\langle t \rangle} = da_{next}*\tanh(c_{next}) * \Gamma_o^{\langle t \rangle}*(1-\Gamma_o^{\langle t \rangle})\\d\tilde c^{\langle t \rangle} = dc_{next}*\Gamma_i^{\langle t \rangle}+ \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * i_t * da_{next} * \tilde c^{\langle t \rangle} * (1-\tanh(\tilde c)^2) \\d\Gamma_u^{\langle t \rangle} = dc_{next}*\tilde c^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * \tilde c^{\langle t \rangle} * da_{next}*\Gamma_u^{\langle t \rangle}*(1-\Gamma_u^{\langle t \rangle})\\d\Gamma_f^{\langle t \rangle} = dc_{next}*\tilde c_{prev} + \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * c_{prev} * da_{next}*\Gamma_f^{\langle t \rangle}*(1-\Gamma_f^{\langle t \rangle})\\
參數的導數
dWf=dΓft(aprevxt)TdWu=dΓut(aprevxt)TdWc=dc~t(aprevxt)TdWo=dΓot(aprevxt)T dW_f = d\Gamma_f^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \\ dW_u = d\Gamma_u^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \\ dW_c = d\tilde c^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \\dW_o = d\Gamma_o^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T

def lstm_cell_backward(da_next, dc_next, cache):
    # 從cache中獲取信息
    (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) = cache
    
    # 獲取xt與a_next的維度信息
    n_x, m = xt.shape
    n_a, m = a_next.shape
    
    dot = da_next * np.tanh(c_next) * ot * (1 - ot)
    dcct = (dc_next * it + ot * (1 - np.square(np.tanh(c_next))) * it * da_next) * (1 - np.square(cct))
    dit = (dc_next * cct + ot * (1 - np.square(np.tanh(c_next))) * cct * da_next) * it * (1 - it)
    dft = (dc_next * c_prev + ot * (1 - np.square(np.tanh(c_next))) * c_prev * da_next) * ft * (1 - ft)
    
    # 根據公式11-14計算參數的導數
    concat = np.concatenate((a_prev, xt), axis=0).T
    dWf = np.dot(dft, concat)
    dWi = np.dot(dit, concat)
    dWc = np.dot(dcct, concat)
    dWo = np.dot(dot, concat)
    dbf = np.sum(dft,axis=1,keepdims=True)
    dbi = np.sum(dit,axis=1,keepdims=True)
    dbc = np.sum(dcct,axis=1,keepdims=True)
    dbo = np.sum(dot,axis=1,keepdims=True)
    
    
    # 使用公式15-17計算洗起來了隱藏狀態、先前記憶狀態、輸入的導數。
    da_prev = np.dot(parameters["Wf"][:, :n_a].T, dft) + np.dot(parameters["Wc"][:, :n_a].T, dcct) +  np.dot(parameters["Wi"][:, :n_a].T, dit) + np.dot(parameters["Wo"][:, :n_a].T, dot)
        
    dc_prev = dc_next * ft + ot * (1 - np.square(np.tanh(c_next))) * ft * da_next
    
    dxt = np.dot(parameters["Wf"][:, n_a:].T, dft) + np.dot(parameters["Wc"][:, n_a:].T, dcct) +  np.dot(parameters["Wi"][:, n_a:].T, dit) + np.dot(parameters["Wo"][:, n_a:].T, dot)
    
    # 保存梯度信息到字典
    gradients = {"dxt": dxt, "da_prev": da_prev, "dc_prev": dc_prev, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi,
                "dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo}
    
    return gradients

def lstm_backward(da, caches):
    # 從caches中獲取第一個cache(t=1)的值
    caches, x = caches
    (a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0]
    
    # 獲取da與x1的維度信息
    n_a, m, T_x = da.shape
    n_x, m = x1.shape
    
    # 初始化梯度
    dx = np.zeros([n_x, m, T_x])
    da0 = np.zeros([n_a, m])
    da_prevt = np.zeros([n_a, m])
    dc_prevt = np.zeros([n_a, m])
    dWf = np.zeros([n_a, n_a + n_x])
    dWi = np.zeros([n_a, n_a + n_x])
    dWc = np.zeros([n_a, n_a + n_x])
    dWo = np.zeros([n_a, n_a + n_x])
    dbf = np.zeros([n_a, 1])
    dbi = np.zeros([n_a, 1])
    dbc = np.zeros([n_a, 1])
    dbo = np.zeros([n_a, 1])
    
    # 處理所有時間步
    for t in reversed(range(T_x)):
        # 使用lstm_cell_backward函數計算所有梯度
        gradients = lstm_cell_backward(da[:,:,t],dc_prevt,caches[t])
        # 保存相關參數
        dx[:,:,t] = gradients['dxt']
        dWf = dWf+gradients['dWf']
        dWi = dWi+gradients['dWi']
        dWc = dWc+gradients['dWc']
        dWo = dWo+gradients['dWo']
        dbf = dbf+gradients['dbf']
        dbi = dbi+gradients['dbi']
        dbc = dbc+gradients['dbc']
        dbo = dbo+gradients['dbo']
    # 將第一個激活的梯度設置爲反向傳播的梯度da_prev。
    da0 = gradients['da_prev']

    # 保存所有梯度到字典變量內
    gradients = {"dx": dx, "da0": da0, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi,
                "dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo}
    
    return gradients

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章