
在理解了线性回归的基础上学习python编程实现。
一、损失函数及其图像
,理解损失函数是如何得到的,理解损失函数就是总样本误差关于模型参数的函数,即找到一组、,使得值最小,其图像就是一个曲面。
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as mp
xs=np.array([0.8,0.7,0.8,1.1,1.6])
ys=np.array([4.0,5.2,6.4,6.2,8.0])
n=500
w0_grid,w1_grid=np.meshgrid(
np.linspace(-15, 15,n),np.linspace(-15,15,n))
loss=0
for x,y in zip(xs,ys):
loss+=(w0_grid+w1_grid*x-y)**2/2
mp.figure('Loss Function',facecolor='lightgray')
ax3d=mp.gca(projection='3d')
ax3d.set_xlabel('w0')
ax3d.set_ylabel('w1')
ax3d.set_zlabel('loss')
ax3d.plot_surface(w0_grid,w1_grid,loss,
cstride=30,rstride=30,cmap='jet')
mp.show()
二、梯度下降法求、
掌握偏导数的意义、如何求偏导数、学习率有什么意义、w0、w1的随机定义、d0、d1公式的推导
等知识点。
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 20:16:19 2022
@author: LENOVO
"""
import numpy as np
import matplotlib.pyplot as mp
train_x=np.array([0.5,0.6,0.8,1.1,1.4])
train_y=np.array([5.0,5.5,6.0,6.8,7.0])
w0,w1=1,1#随机定义
times=1000
lrate=0.01#学习率
#求w0、w1
for i in range(1,times+1):
#d0,d1由偏导推出公式
d0=(w0+w1*train_x-train_y).sum()
d1=(train_x*(w0+w1*train_x-train_y)).sum()
#更新w0、w1
w0=w0-lrate*d0
w1=w1-lrate*d1
print('w0',w0)
print('w1',w1)
#绘图
linex=np.linspace(train_x.min(), train_x.max(),2)
liney=w1*linex+w0
mp.figure('Linear Regression',facecolor='lightgray')
mp.title('Linear Regression', fontsize=18)
mp.grid(linestyle=':')
mp.scatter(train_x,train_y,s=80,marker='o',color='dodgerblue',label='Samples')
mp.plot(linex,liney,color='orangered',linewidth=2,label='Regression Line')
mp.legend()
mp.show()
三、查看一下w0、w1、loss值的变化
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 27 08:58:54 2022
@author: LENOVO
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 20:16:19 2022
@author: LENOVO
"""
import numpy as np
train_x=np.array([0.5,0.6,0.8,1.1,1.4])
train_y=np.array([5.0,5.5,6.0,6.8,7.0])
w0,w1=1,1#随机定义
times=1000
lrate=0.01#学习率
#求w0、w1
for i in range(1,times+1):
loss=((w0+w1*train_x-train_y)**2).sum()/2
print('{:4}>w0={:.8f},w1={:.8f},loss={:.8f}'.format(i,w0,w1,loss))
#d0,d1由偏导推出公式
d0=(w0+w1*train_x-train_y).sum()
d1=(train_x*(w0+w1*train_x-train_y)).sum()
#更新w0、w1
w0=w0-lrate*d0
w1=w1-lrate*d1
图形显示:
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 27 09:22:12 2022
@author: LENOVO
"""
import numpy as np
import matplotlib.pyplot as mp
train_x=np.array([0.5,0.6,0.8,1.1,1.4])
train_y=np.array([5.0,5.5,6.0,6.8,7.0])
w0,w1,losses,epoches=[1],[1],[],[]
times=1000
lrate=0.01#学习率
#求损失函数关于w0、w1的偏导
for i in range(1,times+1):
epoches.append(i)#迭代次数
loss=((w0[-1]+w1[-1]*train_x-train_y)**2).sum()/2
losses.append(loss)
print('{:4}>w0={:.8f},w1={:.8f},loss={:.8f}'.format(i,w0[-1],w1[-1],loss))
#d0,d1由偏导推出公式
d0=(w0[-1]+w1[-1]*train_x-train_y).sum()
d1=(train_x*(w0[-1]+w1[-1]*train_x-train_y)).sum()
#更新w0、w1
w0.append(w0[-1]-lrate*d0)
w1.append(w1[-1]-lrate*d1)
print('w0',w0[-1])
print('w1',w1[-1])
#绘图w0\w1\loss变化图
mp.figure('Training Progress',facecolor='lightgray')
mp.title('Training Progress', fontsize=18)
mp.grid(linestyle=':')
mp.ylabel(r'$w_0$',fontsize=14)
mp.plot(epoches,w0[:-1],color='dodgerblue',
label=r'$w_0$')
mp.legend()
mp.tight_layout()
mp.show()
mp.grid(linestyle=':')
mp.ylabel(r'$w_1$',fontsize=14)
mp.plot(epoches,w1[:-1],color='dodgerblue',
label=r'$w_1$')
mp.legend()
mp.tight_layout()
mp.show()
mp.grid(linestyle=':')
mp.ylabel(r'$loss$',fontsize=14)
mp.plot(epoches,losses,color='orangered',
label=r'$loss$')
mp.legend()
mp.tight_layout()
mp.show()
调整lrate的值,查看图形变化情况,进一步理解梯度算法。
欢迎分享,转载请注明来源:内存溢出
微信扫一扫
支付宝扫一扫
评论列表(0条)