Example #1
0
    X = data_dict["X"].squeeze(dim=0)
    TX = neural_map(X)

    torch_losses = {"identity_loss": identity_loss_fn(X, TX)}

    torch_losses_take_step(loss_dict=torch_losses,
                           optimizer=opt_tm,
                           loss_names=["identity_loss"])

    roll_average(loss_dict=torch_losses,
                 mets_dict=mets,
                 metrics=["identity_loss"],
                 iteration=iteration)

    if (iteration + 1) % n_stats_to_tensorboard == 0:
        crayon_ship_metrics(ccexp, mets, ["identity_loss"], iteration)

# iterations to evaluate on
eval_iters: List[int] = []

# training loop
for iteration, data_dict in enumerate(dataloader_train):

    X = data_dict["X"].squeeze(dim=0)
    Y = data_dict["Y"].squeeze(dim=0)
    TX = neural_map(X)

    torch_losses = {
        "means_loss": means_loss_fn(X, TX),
        "covariance_loss": covariance_loss_fn(TX, Y),
        "l2_loss": l2_loss_fn(TX, X)
Example #2
0
    X = data_dict["X"].squeeze(dim=0)
    TX = neural_map(X)

    torch_losses = {"identity_loss": identity_loss_fn(X, TX)}

    torch_losses_take_step(loss_dict=torch_losses,
                           optimizer=opt_tm,
                           loss_names=["identity_loss"])

    roll_average(loss_dict=torch_losses,
                 mets_dict=mets,
                 metrics=["identity_loss"],
                 iteration=iteration)

    if (iteration + 1) % n_stats_to_tensorboard == 0:
        crayon_ship_metrics(ccexp, mets, ["identity_loss"], iteration)

#####################################
# The exponential loss function for the flow
# initialize centers for discrepancy
#####################################
Z_1 = np.arange(-2, 2, .05)
Z_2 = np.arange(-2, 2, .05)
Z_11, Z_22 = np.meshgrid(Z_1, Z_2)
Z = np.stack((Z_11.reshape(-1, ), Z_22.reshape(-1, )), axis=1)
Ztr = torch.tensor(Z, dtype=torch.float32)

sig = torch.tensor([0.25] * Ztr.shape[0])

# iterations to evaluate on
eval_iters: List[int] = []
    # this works even if X & Y batches have different sizes
    prob_delta = prob_raw - 1.0 / batch_dimY
    torch_losses = {
        "initialization_loss_plan": (prob_delta * prob_delta).mean()
    }

    torch_losses_take_step(loss_dict=torch_losses,
                           optimizer=opt_plan,
                           loss_names=["initialization_loss_plan"])

    roll_average(loss_dict=torch_losses,
                 mets_dict=mets,
                 metrics=["initialization_loss_plan"],
                 iteration=iteration)
    if (iteration + 1) % n_stats_to_tensorboard == 0:
        crayon_ship_metrics(ccexp, mets, ["initialization_loss_plan"],
                            iteration)

# iterations to evaluate on
eval_iters: List[int] = []

# supervised training loop
# keeps track of the current step
step_counter = 0

for iteration, data_dict in enumerate(dataloader_train):

    X = data_dict["X"].squeeze(dim=0)
    Y = data_dict["Y"].squeeze(dim=0)
    batch_dimX, space_dimX = X.shape
    batch_dimY, space_dimY = Y.shape
Example #4
0
        "potential_u_initialization_loss": identity_loss_fn(0.0, potential_u(X)),
        "potential_v_initialization_loss": identity_loss_fn(0.0, potential_v(Y))
    }

    torch_losses_take_step(loss_dict=torch_losses,
                           optimizer=opt_potential,
                           loss_names=["potential_u_initialization_loss",
                                       "potential_v_initialization_loss"])
    
    roll_average(loss_dict=torch_losses, mets_dict=mets,
                 metrics=["potential_u_initialization_loss",
                          "potential_v_initialization_loss"],
                 iteration=iteration)

    if (iteration + 1) % n_stats_to_tensorboard == 0:
        crayon_ship_metrics(ccexp, mets, ["identity_loss"],
                            iteration)


# iterations to evaluate on
eval_iters: List[int] = []

# layer to use with l2
reluLayer = torch.nn.ReLU()
# training loop for the dual
for iteration, data_dict in enumerate(dataloader_train):

    X = data_dict["X"].squeeze(dim=0)
    Y = data_dict["Y"].squeeze(dim=0)

    # find the constraint violation
    C = square_distances_fn(X, Y)
    TX = neural_map(X)

    torch_losses = {
        "identity_loss" : identity_loss_fn(X, TX)
        }

    torch_losses_take_step(loss_dict=torch_losses,
                           optimizer=opt_tm,
                           loss_names=["identity_loss"])
    
    roll_average(loss_dict=torch_losses, mets_dict=mets,
                 metrics=["identity_loss"],
                 iteration=iteration)

    if (iteration + 1) % n_stats_to_tensorboard == 0:
        crayon_ship_metrics(ccexp, mets, ["identity_loss"],
                            iteration)

# iterations to evaluate on
eval_iters: List[int] = []

# training loop
for iteration, data_dict in enumerate(dataloader_train):

    X = data_dict["X"].squeeze(dim=0)
    Y = data_dict["Y"].squeeze(dim=0)
    TX = neural_map(X)

    torch_losses = {
        "mean_discrepancy": lambda_par * mean_discrepancy_fn(TX, Y, cutoff=cutoff_par),
        "l2_loss": l2_loss_fn(TX, X)
    }
Example #6
0
    torch_losses_take_step(loss_dict=torch_losses,
                           optimizer=opt_tm,
                           loss_names=["identity_loss"])

    torch_losses_take_step(loss_dict=torch_losses,
                           optimizer=opt_critic,
                           loss_names=["zero_loss"],
                           retain_graph=True)

    roll_average(loss_dict=torch_losses,
                 mets_dict=mets,
                 metrics=["identity_loss", "zero_loss"],
                 iteration=iteration)

    if (iteration + 1) % n_stats_to_tensorboard == 0:
        crayon_ship_metrics(ccexp, mets, ["identity_loss"], iteration)

# decide whether to apply gradient clipping
if grad_clip is not None:
    for p in neural_function.parameters():
        p.register_hook(lambda grad: torch.clamp(grad, -grad_clip, grad_clip))

# iterations to evaluate on
eval_iters: List[int] = []
# training loop
for iteration, data_dict in enumerate(dataloader_train):

    X = data_dict["X"].squeeze(dim=0)
    Y = data_dict["Y"].squeeze(dim=0)
    TX = transport_map(X)