PINN学习记录(2)

PINN学习记录(2)PINN 学习记录 2 PINN 基于解物理的方程的应用 所以我自己学习了一段时间 参考了网上很多的开源项目 末尾会贴出一些 自己总结了一下思路解微分方程 1 ODEf x f x f x f x f x f x f 0 1f 0 1f 0 1 网络构造这里说明一下 之后用 nn module 来解决 这只是建立一个通用网络 importtorchi nnasnnimport nn Module

PINN学习记录(2)

PINN基于解物理的方程的应用,所以我自己学习了一段时间,参考了网上很多的开源项目,末尾会贴出一些,自己总结了一下思路

解微分方程

1、ODE

f ′ ( x ) = f ( x ) f'(x)=f(x) f(x)=f(x)
f ( 0 ) = 1 f(0)=1 f(0)=1

网络构造

这里说明一下,之后用nn.module,来解决,这只是建立一个通用网络

import torch import torch.nn as nn import numpy as np class Net(nn.Module): def __init__(self, NL, NN): # NL是有多少层隐藏层 # NN是每层的神经元数量 super(Net, self).__init__() self.input_layer = nn.Linear(1, NN) self.hidden_layer = nn.ModuleList([nn.Linear(NN, NN) for i in range(NL)]) self.output_layer = nn.Linear(NN, 1) def forward(self, x): o = self.act(self.input_layer(x)) for i, li in enumerate(self.hidden_layer): o = self.act(li(o)) out = self.output_layer(o) return out def act(self, x): return torch.tanh(x) 
网络,损失,优化声明
net=Net(4,20) mse_cost_function = torch.nn.MSELoss(reduction='mean') # Mean squared error optimizer = torch.optim.Adam(net.parameters(),lr=1e-4) 
ode建立
def ode_01(x,net): y=net(x) y_x = torch.autograd.grad(y, x,grad_outputs=torch.ones_like(net(x)),create_graph=True)[0] return y-y_x 

注意:对于torch.autograd.grad,如果没有说明grad_outputs,就用y.sum()

训练
iterations=1000 for epoch in range(iterations): # Loss based on initial value x_in = np.random.uniform(low=0.0, high=2.0, size=(2000, 1)) pt_x_in = Variable(torch.from_numpy(x_in).float(), requires_grad=True) y = torch.exp(pt_x_in)#与真实值作比较 y_0 = net(torch.zeros( 2000,1)) mse_i = mse_cost_function(y_0, torch.ones( 2000,1)) optimizer.zero_grad() # to make the gradients zero # Loss based on ODE pt_all_zeros= Variable(torch.from_numpy(np.zeros((2000,1))).float(), requires_grad=False) pt_y_colection=ode_01(pt_x_in,net) mse_f=mse_cost_function(pt_y_colection,pt_all_zeros) # Combining the loss functions loss = mse_i+ mse_f #y_train测试 y_train0 = net(pt_x_in) loss.backward() # This is for computing gradients using backward propagation optimizer.step() # This is equivalent to : theta_new = theta_old - alpha * derivative of J w.r.t theta if epoch%1000==0: print(epoch, "Traning Loss:", loss.data) print(f'times { 
     epoch} - loss: { 
     loss.item()} - y_0: { 
     y_0}') plt.cla() plt.scatter(pt_x_in.detach().numpy(), y.detach().numpy()) plt.scatter(pt_x_in.detach().numpy(), y_train0.detach().numpy(),c='red') plt.pause(0.1) 
完整CODE
from mylayer import Net import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt import torch.optim as optim from torch.autograd import Variable """ NOTE:当用linspace生成,作图可以用plot 但换成uniform,必须用scatter 否则图形会因为随机取样而失真 """ class MyNet(torch.nn.Module): def __init__(self): super(MyNet, self).__init__() # 第一句话,调用父类的构造函数 self.mylayer1 = Net() def forward(self, a): x = self.mylayer1(a) return x """ 用神经网络模拟微分方程,f(x)'=f(x),初始条件f(0) = 1 """ net=Net(4,20) mse_cost_function = torch.nn.MSELoss(reduction='mean') # Mean squared error optimizer = torch.optim.Adam(net.parameters(),lr=1e-4) def ode_01(x,net): y=net(x) y_x = torch.autograd.grad(y, x,grad_outputs=torch.ones_like(net(x)),create_graph=True)[0] return y-y_x iterations=104 #可以试试用linspace也可以做出来 #x_i = torch.linspace(0, 2, 2000, requires_grad=True).unsqueeze(-1) plt.ion() for epoch in range(iterations): # Loss based on initial value x_bc = np.zeros((500, 1)) x_in = np.random.uniform(low=0.0, high=2.0, size=(2000, 1)) pt_x_in = Variable(torch.from_numpy(x_in).float(), requires_grad=True) y = torch.exp(pt_x_in) y_0 = net(torch.zeros( 2000,1)) y_train0 = net(pt_x_in) mse_i = mse_cost_function(y_0, torch.ones( 2000,1)) optimizer.zero_grad() # to make the gradients zero # Loss based on PDE pt_all_zeros= Variable(torch.from_numpy(np.zeros((2000,1))).float(), requires_grad=False) pt_y_colection=ode_01(pt_x_in,net) mse_f=mse_cost_function(pt_y_colection,pt_all_zeros) # Combining the loss functions loss = mse_i+ mse_f loss.backward() # This is for computing gradients using backward propagation optimizer.step() # This is equivalent to : theta_new = theta_old - alpha * derivative of J w.r.t theta if epoch%1000==0: print(epoch, "Traning Loss:", loss.data) print(f'times { 
     epoch} - loss: { 
     loss.item()} - y_0: { 
     y_0}') plt.cla() plt.scatter(pt_x_in.detach().numpy(), y.detach().numpy()) plt.scatter(pt_x_in.detach().numpy(), y_train0.detach().numpy(),c='red') plt.pause(0.1) 

2、PDE

网络构造
import torch import torch.nn as nn import numpy as np class Net(nn.Module): def __init__(self, NL, NN): # NL是有多少层隐藏层 # NN是每层的神经元数量 super(Net, self).__init__() self.input_layer = nn.Linear(2, NN) self.hidden_layer = nn.ModuleList([nn.Linear(NN, NN) for i in range(NL)]) self.output_layer = nn.Linear(NN, 1) def forward(self, x): o = self.act(self.input_layer(x)) for i, li in enumerate(self.hidden_layer): o = self.act(li(o)) out = self.output_layer(o) return out def act(self, x): return torch.tanh(x) 

输入改2即可

网络,损失,优化声明
net=Net(4,30) mse_cost_function = torch.nn.MSELoss(reduction='mean') # Mean squared error optimizer = torch.optim.Adam(net.parameters(),lr=1e-4) 
pde
def f(x): u = net(x) u_x = torch.autograd.grad(u, x,grad_outputs=torch.ones_like(net(x)), create_graph=True,allow_unused=True) u_t = torch.autograd.grad(u, x,grad_outputs=torch.ones_like(net(x)), create_graph=True,allow_unused=True) d_x= u_x[0][:, 1].unsqueeze(-1) d_t = u_t[0][:, 0].unsqueeze(-1) u_xx=torch.autograd.grad(d_x, x, grad_outputs=torch.ones_like(d_x), create_graph=True,allow_unused=True)[0][:, 1].unsqueeze(-1) w = torch.tensor(0.01 / np.pi) f = d_t + u * d_x - w * u_xx return f 

这里卡了一下,因为我一开始u_x = torch.autograd.grad(u, x[:,1],grad_outputs=torch.ones_like(net(x)), create_graph=True,allow_unused=True)
这样会报错

边界和初始值
#boundary t_bc = np.zeros((2000,1)) x_bc = np.random.uniform(low=-1.0, high=1.0, size=(2000,1)) # compute u based on BC u_bc = -np.sin(np.pi*x_bc) #initial x_inr=np.ones((2000,1)) x_inl=-np.ones((2000,1)) t_in=np.random.uniform(low=0, high=1.0, size=(2000,1)) u_in= np.zeros((2000,1)) 
训练
for epoch in range(iterations): optimizer.zero_grad() # to make the gradients zero # Loss based on boundary conditions pt_x_bc = Variable(torch.from_numpy(x_bc).float(), requires_grad=False) pt_t_bc = Variable(torch.from_numpy(t_bc).float(), requires_grad=False) pt_u_bc = Variable(torch.from_numpy(u_bc).float(), requires_grad=False) net_bc_out = net(torch.cat([ pt_t_bc,pt_x_bc],1)) # output of u(x,t) mse_u1 = mse_cost_function(net_bc_out, pt_u_bc) # Loss based on initial value pt_x_inr = Variable(torch.from_numpy(x_inr).float(), requires_grad=False) pt_x_inl = Variable(torch.from_numpy(x_inl).float(), requires_grad=False) pt_t_in = Variable(torch.from_numpy(t_in).float(), requires_grad=False) pt_u_in = Variable(torch.from_numpy(u_in).float(), requires_grad=False) net_bc_inr = net(torch.cat([ pt_t_in,pt_x_inr],1)) # output of u(x,t) net_bc_inl = net(torch.cat([ pt_t_in,pt_x_inl],1)) mse_u2r = mse_cost_function(net_bc_inr, pt_u_in) mse_u2l = mse_cost_function(net_bc_inl, pt_u_in) # Loss based on PDE x_collocation = np.random.uniform(low=-1.0, high=1.0, size=(2000, 1)) t_collocation = np.random.uniform(low=0.0, high=1.0, size=(2000, 1)) all_zeros = np.zeros((2000, 1)) pt_x_collocation = Variable(torch.from_numpy(x_collocation).float(), requires_grad=True) pt_t_collocation = Variable(torch.from_numpy(t_collocation).float(), requires_grad=True) pt_all_zeros = Variable(torch.from_numpy(all_zeros).float(), requires_grad=False) f_out = f(torch.cat([pt_t_collocation, pt_x_collocation],1)) # output of f(x,t) mse_f = mse_cost_function(f_out, pt_all_zeros) # Combining the loss functions loss = mse_u1+mse_u2r+mse_u2l + mse_f loss.backward() # This is for computing gradients using backward propagation optimizer.step() # This is equivalent to : theta_new = theta_old - alpha * derivative of J w.r.t theta with torch.autograd.no_grad(): if epoch%1000==0: print(epoch, "Traning Loss:", loss.data) 
画图
#画图 from matplotlib import cm t = np.linspace(0,1,100) x = np.linspace(-1,1,256) ms_t, ms_x = np.meshgrid(t, x) x = np.ravel(ms_x).reshape(-1, 1) t = np.ravel(ms_t).reshape(-1, 1) pt_x = Variable(torch.from_numpy(x).float(), requires_grad=True) pt_t = Variable(torch.from_numpy(t).float(), requires_grad=True) pt_u0 = net(torch.cat([pt_t,pt_x],1)) u = pt_u0.data.cpu().numpy() pt_u0=u.reshape(256,100) fig = plt.figure() ax = fig.add_subplot(projection='3d') ax.set_zlim([-1, 1]) ax.plot_surface(ms_t, ms_x, pt_u0, cmap=cm.RdYlBu_r, edgecolor='blue', linewidth=0.0003, antialiased=True) ax.set_xlabel('t') ax.set_ylabel('x') ax.set_zlabel('u') plt.savefig('Preddata.png') plt.close(fig) 
fig = plt.figure() ax = fig.gca(projection='3d') x = np.arange(-1, 1, 0.02) t = np.arange(0, 1, 0.02) ms_x, ms_t = np.meshgrid(x, t) # Just because meshgrid is used, we need to do the following adjustment x = np.ravel(ms_x).reshape(-1, 1) t = np.ravel(ms_t).reshape(-1, 1) pt_x = Variable(torch.from_numpy(x).float(), requires_grad=True).to(device) pt_t = Variable(torch.from_numpy(t).float(), requires_grad=True).to(device) pt_u = net(pt_x, pt_t) u = pt_u.data.cpu().numpy() ms_u = u.reshape(ms_t.shape) surf = ax.plot_surface(ms_x, ms_t, ms_u, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) fig.colorbar(surf, shrink=0.5, aspect=5) plt.show() 
结果
1在这里插入图片描述
2

2

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请联系我们举报,一经查实,本站将立刻删除。

发布者:全栈程序员-站长,转载请注明出处:https://javaforall.net/233913.html原文链接:https://javaforall.net

(0)
全栈程序员-站长的头像全栈程序员-站长


相关推荐

  • 锐捷交换机配置保存到计算机,锐捷交换机配置命令总结中篇

    锐捷交换机配置保存到计算机,锐捷交换机配置命令总结中篇锐捷交换机显示命令:显示交换机硬件及软件的信息Switch#showversion显示当前运行的配置参数Switch#showrunning-config显示保存的配置参数Switch#showconfigure常用锐捷交换机EXEC命令将当前运行的配置参数复制到flash:Switch#writememoryBuildingconfiguration…[OK]Switch#清…

    2022年6月30日
    139
  • HTML5 语义元素

    返回目录 http://hovertree.com/h/bjaf/html5zixueji.htm一个语义元素能够清楚的描述其意义给浏览器和开发者。无语义元素实例:<div&gt

    2021年12月23日
    36
  • java帝国的崛起[通俗易懂]

    java帝国的崛起[通俗易懂]1C语言帝国的统治现在是公元1995年,C语言帝国已经统治了我们20多年,实在是太久了。 1972年,随着C语言的诞生和Unix的问世,帝国迅速建立统治,从北美到欧洲,从欧洲到亚洲, 无数程序员臣服在他的脚下。帝国给我们提供了极好的福利:贴近硬件, 运行极快,效率极高。 使用这些福利…

    2022年9月24日
    0
  • 深入解析 Java集合类ArrayList与Vector的区别

    深入解析 Java集合类ArrayList与Vector的区别集合类分为两个分支,Collection与Map,其中Collection接口继承了Iterator接口,继承Iterator接口的类可以使用迭代器遍历元素(即Collection接口的类都可以使用),今天我们从相同点、不同点、以及JDK源码等各个方面来深入解析下,底层使用数组实现的两个集合类:ArrayList与Vector的区别与联系区别与联系:1.ArrayList出现于jdk1…

    2022年5月20日
    33
  • Python用map函数解决Serise列表到字符串的转换

    Python用map函数解决Serise列表到字符串的转换原始数据执行过程改变后样式这些技巧虽然看着很简单很简单,但是也得加深印象啊,挺实用的,小白和大家一起学习加油!

    2022年6月10日
    29
  • 更改host文件_添加host文件

    更改host文件_添加host文件修改host文件来访问GitHub

    2022年10月12日
    0

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注

关注全栈程序员社区公众号