numpy實現梯度下降
x = 1
learning_rate = 0.1
epochs = 50
y = lambda x : x ** 2 - 1
for epoch in range(epochs):
print(epoch, x)
dx = 2 * x
x = x - learning_rate * dx
print(y(x))
pytorch實現梯度下降
import torch
from torch.autograd import Variable
x = torch.Tensor([1])
#建立一個張量 tensor([1.], requires_grad=True)
x = Variable(x, requires_grad=True)
print('grad', x.grad, 'data', x.data)
learning_rate = 0.1
epochs = 20
for epoch in range(epochs):
y = x ** 2 - 1
y.backward()
print('grad',x.grad.data)
x.data=x.data-learning_rate*x.grad.data
x.grad.data.zero_()
print(x.data)
print(y)
numpy實現線性迴歸
import numpy as np
x_data=np.array([1,2,3])
y_data=np.array([2,4,6])
epochs=10
lr=0.1
w=0
cost=[]
yhat=x_data*w
print(x_data)
print(y_data-yhat)
print(-2*(y_data-yhat))
print(x_data.T)
print(-2*(y_data-yhat)@x_data.T)
for epoch in range(epochs):
yhat=x_data*w
loss=np.average((y_data-yhat)**2)
cost.append(loss)
dw=-2*(y_data-yhat)@x_data.T/(x_data.shape[0])
w=w-lr*dw
print(w)
print(w)
pytorch實現線性迴歸
torch.manual_seed(2)
x_data=Variable(torch.Tensor([[1.0],[2.0],[3.0]]))
y_data=Variable(torch.Tensor([[2.0],[4.0],[6.0]]))
epochs=10
lr=0.1
w=Variable(torch.FloatTensor([0]),requires_grad=True)
cost=[]
for epoch in range(epochs):
yhat=x_data*w
loss=torch.mean((yhat-y_data)**2)
cost.append(loss.data.numpy())
loss.backward()
w.data=w.data-lr*w.grad.data
print(w.data)
w.grad.data.zero_()
w.data
pytorch實現一個簡單的神經網絡
上一個任務已經實現:https://blog.csdn.net/zh11403070219/article/details/88092442