f"Save models every {n_save} iterations, for a total of {args.n_models_saved}" ) # initialize network to the identity for iteration, data_dict in enumerate(dataloader_init): X = data_dict["X"].squeeze(dim=0) TX = neural_map(X) torch_losses = {"identity_loss": identity_loss_fn(X, TX)} torch_losses_take_step(loss_dict=torch_losses, optimizer=opt_tm, loss_names=["identity_loss"]) roll_average(loss_dict=torch_losses, mets_dict=mets, metrics=["identity_loss"], iteration=iteration) if (iteration + 1) % n_stats_to_tensorboard == 0: crayon_ship_metrics(ccexp, mets, ["identity_loss"], iteration) # iterations to evaluate on eval_iters: List[int] = [] # training loop for iteration, data_dict in enumerate(dataloader_train): X = data_dict["X"].squeeze(dim=0) Y = data_dict["Y"].squeeze(dim=0) TX = neural_map(X)
f"Save models every {n_save} iterations, for a total of {args.n_models_saved}" ) # initialize network to the identity for iteration, data_dict in enumerate(dataloader_init): X = data_dict["X"].squeeze(dim=0) TX = neural_map(X) torch_losses = {"identity_loss": identity_loss_fn(X, TX)} torch_losses_take_step(loss_dict=torch_losses, optimizer=opt_tm, loss_names=["identity_loss"]) roll_average(loss_dict=torch_losses, mets_dict=mets, metrics=["identity_loss"], iteration=iteration) if (iteration + 1) % n_stats_to_tensorboard == 0: crayon_ship_metrics(ccexp, mets, ["identity_loss"], iteration) ##################################### # The exponential loss function for the flow # initialize centers for discrepancy ##################################### Z_1 = np.arange(-2, 2, .05) Z_2 = np.arange(-2, 2, .05) Z_11, Z_22 = np.meshgrid(Z_1, Z_2) Z = np.stack((Z_11.reshape(-1, ), Z_22.reshape(-1, )), axis=1) Ztr = torch.tensor(Z, dtype=torch.float32)
dim=2) prob_raw = neural_plan(Z).view((batch_dimX, batch_dimY)) # this works even if X & Y batches have different sizes prob_delta = prob_raw - 1.0 / batch_dimY torch_losses = { "initialization_loss_plan": (prob_delta * prob_delta).mean() } torch_losses_take_step(loss_dict=torch_losses, optimizer=opt_plan, loss_names=["initialization_loss_plan"]) roll_average(loss_dict=torch_losses, mets_dict=mets, metrics=["initialization_loss_plan"], iteration=iteration) if (iteration + 1) % n_stats_to_tensorboard == 0: crayon_ship_metrics(ccexp, mets, ["initialization_loss_plan"], iteration) # iterations to evaluate on eval_iters: List[int] = [] # supervised training loop # keeps track of the current step step_counter = 0 for iteration, data_dict in enumerate(dataloader_train): X = data_dict["X"].squeeze(dim=0)
for iteration, data_dict in enumerate(dataloader_init): X = data_dict["X"].squeeze(dim=0) Y = data_dict["Y"].squeeze(dim=0) torch_losses = { "potential_u_initialization_loss": identity_loss_fn(0.0, potential_u(X)), "potential_v_initialization_loss": identity_loss_fn(0.0, potential_v(Y)) } torch_losses_take_step(loss_dict=torch_losses, optimizer=opt_potential, loss_names=["potential_u_initialization_loss", "potential_v_initialization_loss"]) roll_average(loss_dict=torch_losses, mets_dict=mets, metrics=["potential_u_initialization_loss", "potential_v_initialization_loss"], iteration=iteration) if (iteration + 1) % n_stats_to_tensorboard == 0: crayon_ship_metrics(ccexp, mets, ["identity_loss"], iteration) # iterations to evaluate on eval_iters: List[int] = [] # layer to use with l2 reluLayer = torch.nn.ReLU() # training loop for the dual for iteration, data_dict in enumerate(dataloader_train):
# initialize network to the identity for iteration, data_dict in enumerate(dataloader_init): X = data_dict["X"].squeeze(dim=0) TX = neural_map(X) torch_losses = { "identity_loss" : identity_loss_fn(X, TX) } torch_losses_take_step(loss_dict=torch_losses, optimizer=opt_tm, loss_names=["identity_loss"]) roll_average(loss_dict=torch_losses, mets_dict=mets, metrics=["identity_loss"], iteration=iteration) if (iteration + 1) % n_stats_to_tensorboard == 0: crayon_ship_metrics(ccexp, mets, ["identity_loss"], iteration) # iterations to evaluate on eval_iters: List[int] = [] # training loop for iteration, data_dict in enumerate(dataloader_train): X = data_dict["X"].squeeze(dim=0) Y = data_dict["Y"].squeeze(dim=0) TX = neural_map(X)
"identity_loss": identity_loss_fn(X, TX), "zero_loss": neural_function(X).abs().mean() + \ neural_function(Y).abs().mean() } torch_losses_take_step(loss_dict=torch_losses, optimizer=opt_tm, loss_names=["identity_loss"]) torch_losses_take_step(loss_dict=torch_losses, optimizer=opt_critic, loss_names=["zero_loss"], retain_graph=True) roll_average(loss_dict=torch_losses, mets_dict=mets, metrics=["identity_loss", "zero_loss"], iteration=iteration) if (iteration + 1) % n_stats_to_tensorboard == 0: crayon_ship_metrics(ccexp, mets, ["identity_loss"], iteration) # decide whether to apply gradient clipping if grad_clip is not None: for p in neural_function.parameters(): p.register_hook(lambda grad: torch.clamp(grad, -grad_clip, grad_clip)) # iterations to evaluate on eval_iters: List[int] = [] # training loop for iteration, data_dict in enumerate(dataloader_train):