Improving Deep Neural Networks/first-week/

gradient_checking_funcyion.py

import numpy as np

import  gc_utils
#一維線性
def forward_propagation(x,theta):
    """
    實現圖中呈現的線性前向傳播(計算J)(J(theta)= theta * x)
    參數:
    x  - 一個實值輸入
    theta  - 參數,也是一個實數

    返回:
    J  - 函數J的值,用公式J(theta)= theta * x計算
    """
    J = np.dot(theta,x)
    return J
def backward_propagation(x,theta):
    """
    計算J相對於θ的導數。

    參數:
        x  - 一個實值輸入
        theta  - 參數,也是一個實數

    返回:
        dtheta  - 相對於θ的成本梯度
    """
    dtheta = x
    return dtheta
def gradient_check(x,theta,epsilon=1e-7):
    """

    實現圖中的反向傳播。

    參數:
        x  - 一個實值輸入
        theta  - 參數,也是一個實數
        epsilon  - 使用公式(3)計算輸入的微小偏移以計算近似梯度

    返回:
        近似梯度和後向傳播梯度之間的差異
    """

    #使用公式(3)的左側計算gradapprox。
    thetaplus = theta + epsilon                               # Step 1
    thetaminus = theta - epsilon                              # Step 2
    J_plus = forward_propagation(x, thetaplus)                # Step 3
    J_minus = forward_propagation(x, thetaminus)              # Step 4
    gradapprox = (J_plus - J_minus) / (2 * epsilon)           # Step 5


    #檢查gradapprox是否足夠接近backward_propagation()的輸出
    grad = backward_propagation(x, theta)

    numerator = np.linalg.norm(grad - gradapprox)                      # Step 1'
    denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)    # Step 2'
    difference = numerator / denominator                               # Step 3'

    if difference < 1e-7:
        print("梯度檢查:梯度正常!")
    else:
        print("梯度檢查:梯度超出閾值!")

    return difference

#高維
def forward_propagation_n(X,Y,parameters):
    """
       實現圖中的前向傳播(並計算成本)。
       參數:
           X - 訓練集爲m個例子
           Y -  m個示例的標籤
           parameters - 包含參數“W1”,“b1”,“W2”,“b2”,“W3”,“b3”的python字典:
               W1  - 權重矩陣,維度爲(5,4)
               b1  - 偏向量,維度爲(5,1)
               W2  - 權重矩陣,維度爲(3,5)
               b2  - 偏向量,維度爲(3,1)
               W3  - 權重矩陣,維度爲(1,3)
               b3  - 偏向量,維度爲(1,1)
       返回:
           cost - 成本函數(logistic)
       """
    m = X.shape[1]
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    W3 = parameters["W3"]
    b3 = parameters["b3"]

    Z1 = np.dot(W1,X)+b1
    A1 = gc_utils.relu(Z1)
    Z2 = np.dot(W2,A1)+b2
    A2 = gc_utils.relu(Z2)
    Z3 = np.dot(W3, A2) + b3
    A3 = gc_utils.sigmoid(Z3)

    # 計算成本
    logprobs = np.multiply(-np.log(A3), Y) + np.multiply(-np.log(1 - A3), 1 - Y)
    cost = (1 / m) * np.sum(logprobs)

    cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)

    return cost, cache
def backward_propagation_n(X,Y,cache):
    """
       實現圖中所示的反向傳播。

       參數:
           X - 輸入數據點(輸入節點數量,1)
           Y - 標籤
           cache - 來自forward_propagation_n()的cache輸出

       返回:
           gradients - 一個字典,其中包含與每個參數、激活和激活前變量相關的成本梯度。
       """
    m = X.shape[1]
    (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
    dZ3 = A3-Y
    dW3 = 1./m * np.dot(dZ3,A2.T)
    db3 = 1./m * np.sum(dZ3,axis=1,keepdims=True)

    dA2 = np.dot(W3.T,dZ3)
    dZ2 = np.multiply(dA2,np.int64(A2>0))
    dW2 = 1. / m * np.dot(dZ2, A1.T)
    db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)

    dA1 = np.dot(W2.T, dZ2)
    dZ1 = np.multiply(dA1, np.int64(A1 > 0))
    dW1 = 1. / m * np.dot(dZ1, X.T)
    db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)

    gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
                 "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
                 "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}

    return gradients
def gradient_check_n(parameters,gradients,X,Y,epsilon=1e-7):
    """
    檢查backward_propagation_n是否正確計算forward_propagation_n輸出的成本梯度

    參數:
        parameters - 包含參數“W1”,“b1”,“W2”,“b2”,“W3”,“b3”的python字典:
        grad_output_propagation_n的輸出包含與參數相關的成本梯度。
        x  - 輸入數據點,維度爲(輸入節點數量,1)
        y  - 標籤
        epsilon  - 計算輸入的微小偏移以計算近似梯度

    返回:
        difference - 近似梯度和後向傳播梯度之間的差異
    """
    #初始化參數
    parameters_values,keys = gc_utils.dictionary_to_vector(parameters)
    grad = gc_utils.gradients_to_vector(gradients)
    num_parameters = parameters_values.shape[0]
    J_plus = np.zeros((num_parameters,1))
    J_minus = np.zeros((num_parameters,1))
    gradapprox = np.zeros((num_parameters,1))

    for i in range(num_parameters):
        # 計算J_plus [i]。輸入:“parameters_values,epsilon”。輸出=“J_plus [i]”
        thetaplus = np.copy(parameters_values)  # Step 1
        thetaplus[i][0] = thetaplus[i][0] + epsilon  # Step 2
        J_plus[i], cache = forward_propagation_n(X, Y, gc_utils.vector_to_dictionary(thetaplus))  # Step 3 ,cache用不到
        # 計算J_minus [i]。輸入:“parameters_values,epsilon”。輸出=“J_minus [i]”。
        thetaminus = np.copy(parameters_values)  # Step 1
        thetaminus[i][0] = thetaminus[i][0] - epsilon  # Step 2
        J_minus[i], cache = forward_propagation_n(X, Y, gc_utils.vector_to_dictionary(thetaminus))  # Step 3 ,cache用不到

        # 計算gradapprox[i]
        gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)

    # 通過計算差異比較gradapprox和後向傳播梯度。
    numerator = np.linalg.norm(grad - gradapprox)  # Step 1'
    denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)  # Step 2'
    difference = numerator / denominator  # Step 3'
    if difference < 1e-7:
        print("梯度檢查:梯度正常!"+str(difference))
    else:
        print("梯度檢查:梯度超出閾值!"+str(difference))

    return difference

gc_utils.py

import numpy as np
import matplotlib.pyplot as plt

def sigmoid(x):
    """
    Compute the sigmoid of x
 
    Arguments:
    x -- A scalar or numpy array of any size.
 
    Return:
    s -- sigmoid(x)
    """
    s = 1/(1+np.exp(-x))
    return s
def relu(x):
    """
    Compute the relu of x
 
    Arguments:
    x -- A scalar or numpy array of any size.
 
    Return:
    s -- relu(x)
    """
    s = np.maximum(0,x)
    
    return s

def dictionary_to_vector(parameters):
    """
    Roll all our parameters dictionary into a single vector satisfying our specific required shape.
    """
    keys = []
    count = 0
    for key in ["W1", "b1", "W2", "b2", "W3", "b3"]:
        # flatten parameter
        new_vector = np.reshape(parameters[key], (-1,1))
        keys = keys + [key]*new_vector.shape[0]
        if count == 0:
            theta = new_vector
        else:
            theta = np.concatenate((theta, new_vector), axis=0)
        count = count + 1
 
    return theta, keys
 
def vector_to_dictionary(theta):
    """
    Unroll all our parameters dictionary from a single vector satisfying our specific required shape.
    """
    parameters = {}
    parameters["W1"] = theta[:20].reshape((5,4))
    parameters["b1"] = theta[20:25].reshape((5,1))
    parameters["W2"] = theta[25:40].reshape((3,5))
    parameters["b2"] = theta[40:43].reshape((3,1))
    parameters["W3"] = theta[43:46].reshape((1,3))
    parameters["b3"] = theta[46:47].reshape((1,1))
 
    return parameters
 
def gradients_to_vector(gradients):
    """
    Roll all our gradients dictionary into a single vector satisfying our specific required shape.
    """
    
    count = 0
    for key in ["dW1", "db1", "dW2", "db2", "dW3", "db3"]:
        # flatten parameter
        new_vector = np.reshape(gradients[key], (-1,1))
        
        if count == 0:
            theta = new_vector
        else:
            theta = np.concatenate((theta, new_vector), axis=0)
        count = count + 1
 
    return theta

testCases.py

import numpy as np

def gradient_check_n_test_case():
    np.random.seed(1)
    x = np.random.randn(4, 3)
    y = np.array([1, 1, 0])
    W1 = np.random.randn(5, 4)
    b1 = np.random.randn(5, 1)
    W2 = np.random.randn(3, 5)
    b2 = np.random.randn(3, 1)
    W3 = np.random.randn(1, 3)
    b3 = np.random.randn(1, 1)
    parameters = {"W1": W1,
                  "b1": b1,
                  "W2": W2,
                  "b2": b2,
                  "W3": W3,
                  "b3": b3}

    return x, y, parameters

gradient_checking.py

import testCases
from  gradient_checking_function import forward_propagation,backward_propagation,gradient_check
from  gradient_checking_function import  forward_propagation_n,backward_propagation_n,gradient_check_n

#測試gradient_check
print("-----------------測試gradient_check-----------------")
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))


#test  高維
print("-----------------測試gradient_check_n-----------------")
X, Y, parameters = testCases.gradient_check_n_test_case()

cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章