最優化算法最速下降法、牛頓法、擬牛頓法 Python實現

只用到了numpy這一個庫,只要安裝有這個庫應該都可以直接運行

import numpy as np

def f(x):   #目標函數
    x1 = x[0]
    x2 = x[1]
    y = 100*((x2 - x1**2)**2) + (x1-1)**2
    return y

def num_grad(x, h):     #求梯度
    df = np.zeros(x.size)
    for i in range(x.size):
        x1, x2 = x.copy(), x.copy()  #這裏需要用到複製,而不能用賦值號(=),原因是Python裏面=號只是取別名,不是複製(c/c++裏面是)
        x1[i] = x[i] - h
        x2[i] = x[i] + h
        y1, y2 = f(x1), f(x2)
        df[i] = (y2-y1)/(2*h)
    return df

def num_hess(x, h):    #求hess矩陣
    hess = np.zeros((x.size, x.size))
    for i in range(x.size):
        x1 = x.copy()
        x1[i] = x[i] - h
        df1 = num_grad(x1, h)
        x2 = x.copy()
        x2[i] = x[i] + h
        df2 = num_grad(x2, h)
        d2f = (df2 - df1) / (2 * h)   
        hess[i] = d2f     
    return hess
    
def linesearch(x, dk):   #求步長
    ak = 1
    for i in range(20):
        newf, oldf = f(x + ak * dk), f(x)
        if newf < oldf:
            return ak
        else:
            ak = ak / 4  #迭代更新步長,步長可隨意變換,保證newf比oldf小就可以了(如改爲: ak=ak/2 也是可以的)
    return ak

def steepest(x):   #最速下降法
    epsilon, h, maxiter = 10**-5, 10**-5, 10**4
    for iter1 in range(maxiter):
        grad = num_grad(x, h)
        if np.linalg.norm(grad) < epsilon:
            return x
        dk = -grad
        ak = linesearch(x, dk)
        x = x + ak * dk
    return x

def newTonFuction(x):   #牛頓法
    epsilon, h1, h2, maxiter = 10**-5, 10**-5, 10**-5, 10**4
    for iter1 in range(maxiter):
        grad = num_grad(x, h1)
        if np.linalg.norm(grad) < epsilon:
            return x
        hess = num_hess(x, h2)
        dk = -np.dot((np.linalg.inv(hess)), grad)
        x = x + dk
    return x

def BFGS(x):   #擬牛頓法
    epsilon, h, maxiter = 10**-5, 10**-5, 10**4
    Bk = np.eye(x.size)
    for iter1 in range(maxiter):
        grad = num_grad(x, h)
        if np.linalg.norm(grad) < epsilon:
            return x
        dk = -np.dot((np.linalg.inv(Bk)), grad)
        ak = linesearch(x, dk)
        x = x + dk*ak
        yk = num_grad(x, h) -grad
        sk = ak*dk
        if np.dot(yk.reshape(1, grad.shape[0]), sk) > 0:
            '''第一種分步計算實現
            t0 = np.dot(Bk, sk)
            t1 = np.dot(t0.reshape(sk.shape[0], 1), sk.reshape(1, sk.shape[0]))
            temp0 = np.dot(t1, Bk)
            temp1 = np.dot(np.dot(sk.reshape(1, sk.shape[0]), Bk), sk)
            tmp0 = np.dot(yk.reshape(yk.shape[0], 1), yk.reshape(1, yk.shape[0]))
            tmp1 = np.dot(yk.reshape(1, yk.shape[0]), sk)
            Bk = Bk - temp0 / temp1 + tmp0 / tmp1
            '''
            #第二種直接寫公式實現
            Bk = Bk - np.dot(np.dot(np.dot(Bk, sk).reshape(sk.shape[0], 1), sk.reshape(1, sk.shape[0])), Bk)/np.dot(np.dot(sk.reshape(1, sk.shape[0]), Bk), sk) + np.dot(yk.reshape(yk.shape[0], 1), yk.reshape(1, yk.shape[0])) / np.dot(yk.reshape(1, yk.shape[0]), sk)
    return x


#x0 = np.array([0.999960983973235, 0.999921911551354])  #初始解
x0 = np.array([0.7, 0.9])    #初始解
x = steepest(x0)     #調用最速下降法
print("最速下降法最後的解向量:",x)
print("最速下降法最後的解:",f(x))
print('')
x = newTonFuction(x0)     #調用牛頓法
print("牛頓法最後的解向量:", x)
print("牛頓法最後的解:", f(x))
print('')
x = BFGS(x0)     #調用擬牛頓法
print("擬牛頓法最後的解向量:", x)
print("擬牛頓法最後的解:", f(x))
print('')

結果如下
在這裏插入圖片描述

擬牛頓法感覺弄麻煩了,暫時也沒想法改,先就這樣吧

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章