pde_accuracy阅读【1】
from kan import KAN, LBFGS
import torch
import matplotlib.pyplot as plt
from torch import autograd
from tqdm import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
dim = 2
np_i = 51 # number of interior points (along each dimension)
np_b = 51 # number of boundary points (along each dimension)
ranges = [-1, 1]
def batch_jacobian(func, x, create_graph=False):
# x in shape (Batch, Length)
def _func_sum(x):
return func(x).sum(dim=0)
return autograd.functional.jacobian(_func_sum, x, create_graph=create_graph).permute(1,0,2)
# define solution
sol_fun = lambda x: torch.sin(torch.pi*x[:,[0]])*torch.sin(torch.pi*x[:,[1]])
source_fun = lambda x: -2*torch.pi**2 * torch.sin(torch.pi*x[:,[0]])*torch.sin(torch.pi*x[:,[1]])
# interior
sampling_mode = 'mesh' # 'radnom' or 'mesh'
x_mesh = torch.linspace(ranges[0],ranges[1],steps=np_i)
y_mesh = torch.linspace(ranges[0],ranges[1],steps=np_i)
X, Y = torch.meshgrid(x_mesh, y_mesh, indexing="ij")
if sampling_mode == 'mesh':
#mesh
x_i = torch.stack([X.reshape(-1,), Y.reshape(-1,)]).permute(1,0)
else:
#random
x_i = torch.rand((np_i**2,2))*2-1
x_i = x_i.to(device)
# boundary, 4 sides
helper = lambda X, Y: torch.stack([X.reshape(-1,), Y.reshape(-1,)]).permute(1,0)
xb1 = helper(X[0], Y[0])
xb2 = helper(X[-1], Y[0])
xb3 = helper(X[:,0], Y[:,0])
xb4 = helper(X[:,0], Y[:,-1])
x_b = torch.cat([xb1, xb2, xb3, xb4], dim=0)
x_b = x_b.to(device)
alpha = 0.01
log = 1
grids = [5,10,20]
steps = 50
pde_losses = []
bc_losses = []
l2_losses = []
for grid in grids:
if grid == grids[0]:
model = KAN(width=[2,2,1], grid=grid, k=3, seed=1, device=device)
model = model.speed()
else:
model.save_act = True
model.get_act(x_i)
model = model.refine(grid)
model = model.speed()
def train():
optimizer = LBFGS(model.parameters(), lr=1, history_size=10, line_search_fn="strong_wolfe", tolerance_grad=1e-32, tolerance_change=1e-32, tolerance_ys=1e-32)
pbar = tqdm(range(steps), desc='description', ncols=100)
for _ in pbar:
def closure():
global pde_loss, bc_loss
optimizer.zero_grad()
# interior loss
sol = sol_fun(x_i)
sol_D1_fun = lambda x: batch_jacobian(model, x, create_graph=True)[:,0,:]
sol_D1 = sol_D1_fun(x_i)
sol_D2 = batch_jacobian(sol_D1_fun, x_i, create_graph=True)[:,:,:]
lap = torch.sum(torch.diagonal(sol_D2, dim1=1, dim2=2), dim=1, keepdim=True)
source = source_fun(x_i)
pde_loss = torch.mean((lap - source)**2)
# boundary loss
bc_true = sol_fun(x_b)
bc_pred = model(x_b)
bc_loss = torch.mean((bc_pred-bc_true)**2)
loss = alpha * pde_loss + bc_loss
loss.backward()
return loss
if _ % 5 == 0 and _ < 20:
model.update_grid_from_samples(x_i)
optimizer.step(closure)
sol = sol_fun(x_i)
loss = alpha * pde_loss + bc_loss
l2 = torch.mean((model(x_i) - sol)**2)
if _ % log == 0:
pbar.set_description("pde loss: %.2e | bc loss: %.2e | l2: %.2e " % (pde_loss.cpu().detach().numpy(), bc_loss.cpu().detach().numpy(), l2.cpu().detach().numpy()))
pde_losses.append(pde_loss.cpu().detach().numpy())
bc_losses.append(bc_loss.cpu().detach().numpy())
l2_losses.append(l2.cpu().detach().numpy())
train()
这段代码是一个使用 PyTorch 和 KAN(可能是一个自定义的神经网络库)来训练一个模型以解决偏微分方程(PDE)问题的示例。
from kan import KAN, LBFGS
import torch
import matplotlib.pyplot as plt
from torch import autograd
from tqdm import tqdm
# 检测是否有可用的GPU,如果没有则使用CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
导入必要的库,并设置设备用于计算。
dim = 2
np_i = 51 # 内部点数量(每个维度)
np_b = 51 # 边界点数量(每个维度)
ranges = [-1, 1]
设置问题的维度,内部点和边界点的数量,以及坐标的范围。
def batch_jacobian(func, x, create_graph=False):
# x in shape (Batch, Length)
def _func_sum(x):
return func(x).sum(dim=0)
return autograd.functional.jacobian(_func_sum, x, create_graph=create_graph).permute(1,0,2)
定义一个函数来计算批量雅可比矩阵。
# 定义解函数和源项函数
sol_fun = lambda x: torch.sin(torch.pi*x[:,[0]])*torch.sin(torch.pi*x[:,[1]])
source_fun = lambda x: -2*torch.pi**2 * torch.sin(torch.pi*x[:,[0]])*torch.sin(torch.pi*x[:,[1]])
定义了问题的解析解和源项。
# 内部点采样模式
sampling_mode = 'mesh' # 'random' 或 'mesh'
x_mesh = torch.linspace(ranges[0],ranges[1],steps=np_i)
y_mesh = torch.linspace(ranges[0],ranges[1],steps=np_i)
X, Y = torch.meshgrid(x_mesh, y_mesh, indexing="ij")
if sampling_mode == 'mesh':
# 网格采样
x_i = torch.stack([X.reshape(-1,), Y.reshape(-1,)]).permute(1,0)
else:
# 随机采样
x_i = torch.rand((np_i**2,2))*2-1
x_i = x_i.to(device)
设置内部点的采样模式,并生成内部点的坐标。
# 边界点,4个边
helper = lambda X, Y: torch.stack([X.reshape(-1,), Y.reshape(-1,)]).permute(1,0)
xb1 = helper(X[0], Y[0])
xb2 = helper(X[-1], Y[0])
xb3 = helper(X[:,0], Y[:,0])
xb4 = helper(X[:,0], Y[:,-1])
x_b = torch.cat([xb1, xb2, xb3, xb4], dim=0)
x_b = x_b.to(device)
生成边界点的坐标,并将它们移动到相应的设备上。
alpha = 0.01
log = 1
设置PDE损失和边界条件损失的权重以及日志记录的频率。
grids = [5,10,20]
steps = 50
pde_losses = []
bc_losses = []
l2_losses = []
设置不同网格大小,训练步骤,以及用于存储损失的列表。
for grid in grids:
if grid == grids[0]:
model = KAN(width=[2,2,1], grid=grid, k=3, seed=1, device=device)
model = model.speed()
else:
model.save_act = True
model.get_act(x_i)
model = model.refine(grid)
model = model.speed()
对于每个网格大小,初始化或细化模型。
def train():
optimizer = LBFGS(model.parameters(), lr=1, history_size=10, line_search_fn="strong_wolfe", tolerance_grad=1e-32, tolerance_change=1e-32, tolerance_ys=1e-32)
pbar = tqdm(range(steps), desc='description', ncols=100)
for _ in pbar:
def closure():
global pde_loss, bc_loss
optimizer.zero_grad()
# 内部点损失
sol = sol_fun(x_i)
sol_D1_fun = lambda x: batch_jacobian(model, x, create_graph=True)[:,0,:]
sol_D1 = sol_D1_fun(x_i)
sol_D2 = batch_jacobian(sol_D1_fun, x_i, create_graph=True)[:,:,:]
lap = torch.sum(torch.diagonal(sol_D2, dim1=1, dim2=2), dim=1, keepdim=True
这个是刚才的代码的输出进度条。为什么有三个?
这个输出进度条显示了代码执行过程中的三个不同阶段的进度。每个进度条代表一个独立的任务或步骤,这些任务可能是在一个循环或多个并行处理中进行的。每个进度条显示了任务的完成百分比、迭代次数、时间等信息。
具体原因可能包括:
- 多任务处理:代码可能同时处理多个任务,每个进度条对应一个任务。
- 循环迭代:代码可能在循环中执行多个步骤,每个进度条对应循环中的一个迭代。
- 并行处理:代码可能并行执行多个子任务,每个进度条对应一个子任务的进度。
这些进度条帮助用户了解代码执行的整体进度和每个步骤的完成情况。???
原文地址:https://blog.csdn.net/m0_68339197/article/details/144001670
免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!