Ejemplo n.º 1
0
def has_right_tags(exp: comet_ml.Experiment, keep: set, remove: set) -> bool:
    """
    All the "keep" tags should be in the experiment's tags
    None of the "remove" tags should be in the experiment's tags.

    Args:
        exp (comet_ml.Experiment): experiment to select (or not)
        keep (set): tags the exp should have
        remove (set): tags the exp cannot have

    Returns:
        bool: should this exp be selected
    """
    tags = set(exp.get_tags())
    has_all_keep = keep.intersection(tags) == keep
    has_any_remove = remove.intersection(tags)
    return has_all_keep and not has_any_remove
Ejemplo n.º 2
0
        max=50.)

    errors = torch.cat([er_00 + er_01, er_10 + er_11], 1)
    return torch.mean(torch.min(errors, 1)[0])


num_available_nodes = len(federated_generators_list)
tr_step = 0
val_step = 0
prev_epoch_val_loss = 0.
for i in range(hparams['n_global_epochs']):
    res_dic = {}
    for loss_name in all_losses:
        res_dic[loss_name] = {'mean': 0., 'std': 0., 'median': 0., 'acc': []}
    print("Individual Federated Sudo-RM-RF: {} - {} || Epoch: {}/{}".format(
        experiment.get_key(), experiment.get_tags(), i + 1,
        hparams['n_global_epochs']))

    training_nodes = federated_generators_list
    sum_global_loss = 0.

    for train_node_id, node_dic in enumerate(training_nodes):
        local_model = node_dic['local_model']
        local_model = local_model.cuda()
        local_model.train()
        local_opt = torch.optim.Adam(local_model.parameters(),
                                     lr=hparams['learning_rate'])
        if hparams['patience'] > 0:
            if tr_step % hparams['patience'] == 0:
                new_lr = (hparams['learning_rate'] /
                          (hparams['divide_lr_by']
Ejemplo n.º 3
0
model = torch.nn.DataParallel(model).cuda()

opt = torch.optim.Adam(model.parameters(), lr=hparams['learning_rate'])

all_losses = [back_loss_tr_loss_name] + \
             [k for k in sorted(val_losses.keys())] + \
             [k for k in sorted(tr_val_losses.keys())]

tr_step = 0
val_step = 0
for i in range(hparams['n_epochs']):
    res_dic = {}
    for loss_name in all_losses:
        res_dic[loss_name] = {'mean': 0., 'std': 0., 'acc': []}
    print("Experiment: {} - {} || Epoch: {}/{}".format(experiment.get_key(),
                                                       experiment.get_tags(),
                                                       i + 1,
                                                       hparams['n_epochs']))
    model.train()

    for data in tqdm(train_gen, desc='Training'):
        opt.zero_grad()
        m1wavs = data[0].unsqueeze(1).cuda()
        clean_wavs = data[-1].cuda()

        rec_sources_wavs = model(m1wavs)

        l = back_loss_tr_loss(rec_sources_wavs,
                              clean_wavs,
                              initial_mixtures=m1wavs)
        l.backward()
    return new_sources


tr_step = 0
val_step = 0
for i in range(hparams['n_epochs']):
    res_dic = {}
    histograms_dic = {}
    for loss_name in all_losses:
        res_dic[loss_name] = {'mean': 0., 'std': 0., 'acc': []}
        res_dic[loss_name+'i'] = {'mean': 0., 'std': 0., 'acc': []}
    for hist_name in histogram_names:
        histograms_dic[hist_name] = []
        histograms_dic[hist_name+'i'] = []
    print("Higher Order Sudo-RM-RF: {} - {} || Epoch: {}/{}".format(
        experiment.get_key(), experiment.get_tags(), i+1, hparams['n_epochs']))
    model.train()

    for data in tqdm(generators['train'], desc='Training'):
        opt.zero_grad()
        #m1wavs = data[0].cuda()
        clean_wavs = data[-1].cuda()

        if hparams['max_abs_snr'] > 0.:
            clean_wavs = mix_with_random_snr(clean_wavs, hparams['max_abs_snr'])

        histograms_dic['tr_input_snr'] += (10. * torch.log10(
            (clean_wavs[:, 0] ** 2).sum(-1) / (1e-8 + (
                    clean_wavs[:, 1] ** 2).sum(-1)))).tolist()

        # # Online mixing over samples of the batch. (This might cause to get