def ode_block(): dim = 1 + args.codim rhs_steps = args.steps if args.alpha['TV'] >= 0 else 1 rhs = rhs_mlp(dim, args.width, args.depth, T=args.T, num_steps=rhs_steps, activation=args.sigma, learn_scales=args.learn_scales, learn_shift=args.learn_shift) solver = theta_solver(rhs, args.T, args.steps, args.theta, tol=args.tol) return regularized_ode_solver(solver, args.alpha, mciters=args.mciters, p=2, collect_rhs_stat=True)
def ode_block(im_shape, theta=0.00, stability_limits=None, alpha={}, kernel_size=3): rhs = rhs_conv2d(im_shape, kernel_size=kernel_size, depth=2, T=1, num_steps=1, power_iters=args.piters, spectral_limits=args.eiglim, learn_scales=args.learn_scales, learn_shift=args.learn_shift, bias=False, activation=args.sigma) solver = theta_solver( rhs, args.T, args.steps, theta, tol=args.tol ) return regularized_ode_solver( solver, alpha, stability_limits, args.mciters, p=1 )
# NN model rhs = rhs_mlp(1, args.width, args.depth, T=2 * args.T, num_steps=2 * args.steps, activation=args.sigma, power_iters=args.piters, spectral_limits=args.eiglims, learn_scales=args.learn_scales, learn_shift=args.learn_shift) model = regularized_ode_solver(theta_solver( rhs, args.T, args.steps, args.theta, ind_out=torch.arange(0, args.steps + 1, args.steps // args.datasteps), tol=args.tol), alpha=args.alpha, mciters=1, p=0) ######################################################################################### ######################################################################################### # init/train/test model # uncommenting this line will enable debug mode and lead to increased cost and memory leaking # torch.autograd.set_detect_anomaly(True) model = ex_setup.load_model(model, args, _device)
######################################################################################### ######################################################################################### # NN model rhs = rhs_mlp(2, args.width, args.depth, T=1, num_steps=1, activation=args.sigma, learn_scales=args.learn_scales, learn_shift=args.learn_shift) model = theta_solver(rhs, args.T, args.steps, args.theta, ind_out=torch.arange(0, args.steps + 1), tol=args.tol) ######################################################################################### ######################################################################################### # init/train/plot model # uncommenting this line will enable debug mode and lead to increased cost and memory leaking # torch.autograd.set_detect_anomaly(True) model = ex_setup.load_model(model, args, _device) if args.mode == "train": optimizer1 = torch.optim.Adam(model.parameters(), lr=args.lr,
return torch.nn.functional.linear(x, self.weight, self.bias).squeeze(1) ######################################################## if args.name == "plain-10": T = 10 rhs = rhs_mlp(2 + args.codim, args.width, args.depth, T=T, num_steps=T, activation=args.sigma, power_iters=0, spectral_limits=[-15, 15]) model = torch.nn.Sequential(augment(), theta_solver(rhs, T, T, 0.0), linear_classifier()) elif args.name == "1Lip-10": T = 10 rhs = rhs_mlp(2 + args.codim, args.width, args.depth, T=T, num_steps=T, activation=args.sigma, power_iters=1, spectral_limits=[-1, 1]) model = torch.nn.Sequential(augment(), theta_solver(rhs, T, T, 0.0), linear_classifier()) elif args.name == "2Lip-10": T = 10