数学模型参数求解–基于pytorch


对给定数学表达式的参数求解:
代码如下:
import torch
from torch import nn, optim
import numpy as np
import sys
import random
import matplotlib.pyplot as plt
import math
from mpl_toolkits.mplot3d import Axes3D
def get_data(n):
x1=np.linspace(-2,2,n)
x2=np.linspace(-2,2,n)
x_data=[]
y_data=[]
for i in range(n):
for j in range(n):
x_data.append([1,x1[i],x2[j]])
y_data.append((1+np.sin(2*x1[i]+3*x2[j]))/(3.5+np.sin(x1[i]-x2[j])))
return x_data,y_data
def data_generator(x_data,y_data,batch_size,index):
def reader():
x_list = []
label_list = []
for i in index:
#train_list.append(train[i].reshape(1,train[i].shape[0],train[i].shape[1]))
x_list.append(x_data[i])
label_list.append(y_data[i])
if len(x_list) == batch_size:
# 获得一个batchsize的数据,并返回
x_array = torch.tensor(np.array(x_list),dtype=torch.float)
label_array = torch.tensor(np.array(label_list).reshape(-1,1),dtype=torch.float)
yield x_array,label_array
# 清空数据读取列表
x_list = []
label_list = []
# 如果剩余数据的数目小于BATCHSIZE
# 则剩余数据一起构成一个大小为len(imgs_list)的mini-batch
if len(x_list) > 0:
x_array = torch.tensor(np.array(x_list),dtype=torch.float)
label_array = torch.tensor(np.array(label_list).reshape(-1,1),dtype=torch.float)
yield x_array,label_array
return reader
def test_data_generator(x_data,batch_size):
def reader():
x_list = []
for i in range(len(x_data)):
#train_list.append(train[i].reshape(1,train[i].shape[0],train[i].shape[1]))
x_list.append(x_data[i])
if len(x_list) == batch_size:
# 获得一个batchsize的数据,并返回
x_array = torch.tensor(np.array(x_list),dtype=torch.float)
yield x_array
# 清空数据读取列表
x_list = []
# 如果剩余数据的数目小于BATCHSIZE
# 则剩余数据一起构成一个大小为len(imgs_list)的mini-batch
if len(x_list) > 0:
x_array = torch.tensor(np.array(x_list),dtype=torch.float)
yield x_array
return reader
class Max(nn.Module):
def __init__(self,input_size,hidden_size,M):
super(Max,self).__init__()
self.fc1=nn.Sequential(nn.Linear(input_size,M,bias=False),nn.ReLU())
self.fc2=nn.Sequential(nn.Linear(M,hidden_size,bias=False))
def forward(self,x):
x=self.fc1(x)
x=self.fc2(x)
return x
class Baise_model(nn.Module):
def __init__(self,input_size,hidden_size):
super(Baise_model,self).__init__()
self.fc1=nn.Linear(input_size,hidden_size,bias=False)
def forward(self,x):
return self.fc1(x)
def train(net,loss,train_loader,test_loader,optimizer,num_epoch):
net=net.to(device)
for epoch in range(num_epoch):
sum_train_loss=0.0
for batch_id, data in enumerate(train_loader()):
out=0
x_data, y_data = data
x_data=x_data.to(device)
y_data=y_data.to(device)
for blk in net.children():
out+=blk(x_data)
l=loss(out,y_data)
sum_train_loss+=l.cpu().item()
optimizer.zero_grad()
l.backward()
optimizer.step()
print("{}/{} train_loss={}".format(epoch+1,num_epoch,sum_train_loss/(batch_id+1)))
net.eval()
RSSE=0.0
for batch_id, data in enumerate(test_loader()):
out=0
x_data, y_data = data
x_data=x_data.to(device)
for blk in net.children():
out+=blk(x_data)
out=out.cpu().detach().numpy()
y_data=y_data.detach().numpy()
RSSE+=np.square(y_data-out).sum()
print("{}/{} RSSE={}".format(epoch+1,num_epoch,RSSE/mse))
net.train()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_size=3
hidden_size=1
M=35
net=Baise_model(input_size,hidden_size)
net.add_module("relu",Max(input_size,hidden_size,M))
loss=nn.MSELoss()
optimizer=optim.Adam(net.parameters(),lr=0.01)
#optimizer=optim.SGD(net.parameters(), lr = 0.1, momentum=0.9)
num_epoch=1000
train(net,loss,train_loader,test_loader,optimizer,num_epoch)
例二:
对于给定数学模型y=$a\ast\ast3+bx\ast\ast2+cx+d$,其中a,b,c为待求未知量,d为已知量,如何求解a,b,c使给定模型最为拟合二维数据点?借助pytorch内tensor变量的自动求解梯度机制,可以很容易实现,代码如下:
#导入基础API
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import torch.optim
from sklearn import datasets
from tqdm.auto import tqdm
#定义数学公式相对应网络,并初始化
class obj_dataset(Dataset):
def __init__(self,x,y,set_type='all'):
self.X=x
self.Y=y
#transforms_ =[transforms.ToTensor()]
#self.transform = transforms.Compose(transforms_)
def __getitem__(self, index):
x_ = self.X[index]
y_ = self.Y[index]
#x_=self.transform(x_)
return {'x':x_,'y':y_}
def __len__(self):
return len(self.X)
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
class obj_net(nn.Module):
def __init__(self):
super().__init__()
#torch.rand(5,3)均匀分布 y=torch.randn(5,3)标准正态分布。
self.a=nn.Parameter(torch.randn(1), requires_grad=True)
self.b=nn.Parameter(torch.rand(1), requires_grad=True)
self.c=nn.Parameter(torch.rand(1), requires_grad=True)
self.d=nn.Parameter(torch.tensor([-5]), requires_grad=False)
#self.d=nn.Parameter(torch.rand(1), requires_grad=True)
#self.d=nn.Parameter(torch.rand(1), requires_grad=False)
print('inital constant bias is',self.d.data)
def forward(self, x):
out = self.a*torch.pow(x,3)+self.b*torch.pow(x,2)+self.c*x+self.d
return out
#生成数据并训练
N_sample=100
batch_size=64
x=np.array(np.linspace(-15,30,N_sample))
y_model=0.001*np.power(x,3)+0.12*np.power(x,2)+2*x+16
y=y_model+np.random.randn(len(x))*10
dataset=obj_dataset(x,y)
trainloader =DataLoader(dataset,batch_size=64,shuffle=True,num_workers=0,drop_last=False)
#定义数学模型
model=obj_net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device: ',device)
model.to(device)
criterion = torch.nn.MSELoss()
learning_rate = 10
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate, betas=(0.5, 0.999))
total_epoch=64
start_epoch=0
start_delay_epoch=0
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=LambdaLR(total_epoch, start_epoch, start_delay_epoch).step)
tbar=tqdm(range(start_epoch, total_epoch))
for epoch in tbar:
train_loss=0
for i, batch in enumerate(trainloader):
input_=batch['x'].to(device)
target_=batch['y'].to(device)
# 梯度每次清零
optimizer.zero_grad()
# 前向传播
outputs = model(input_)
# 计算损失值
loss = criterion(outputs,target_)
#反向传播
loss.backward()
train_loss += loss.item()
#更新权重参数
optimizer.step()
lr_scheduler.step()
tbar.set_description('Train loss: %.3f' % (train_loss/len(trainloader)))
print('finish trainning with the bias is',model.d.data)
#可视化结果
plt.plot(x,y,label='true')
pre=model(torch.tensor(x).to(device)).data.cpu().numpy()
plt.plot(x,pre,label='predict')
plt.legend()
plt.show()
#输出数学模型a,b,c,d值
for it in model.parameters():
print(it)
参考文献:https://blog.csdn.net/qq_43530128/article/details/103939508
小墨
2021年11月3日 上午12:14
优秀,值得收藏