コード例 #1
0
def main(
    model_dir: str,
    vc_src: str,
    vc_tgt: str,
    adv_tgt: str,
    output: str,
    eps: float,
    n_iters: int,
    attack_type: str,
):
    assert attack_type == "emb" or vc_src is not None
    model, config, attr, device = load_model(model_dir)

    vc_tgt = file2mel(vc_tgt, **config["preprocess"])
    adv_tgt = file2mel(adv_tgt, **config["preprocess"])

    vc_tgt = normalize(vc_tgt, attr)
    adv_tgt = normalize(adv_tgt, attr)

    vc_tgt = torch.from_numpy(vc_tgt).T.unsqueeze(0).to(device)
    adv_tgt = torch.from_numpy(adv_tgt).T.unsqueeze(0).to(device)

    if attack_type != "emb":
        vc_src = file2mel(vc_src, **config["preprocess"])
        vc_src = normalize(vc_src, attr)
        vc_src = torch.from_numpy(vc_src).T.unsqueeze(0).to(device)

    if attack_type == "e2e":
        adv_inp = e2e_attack(model, vc_src, vc_tgt, adv_tgt, eps, n_iters)
    elif attack_type == "emb":
        adv_inp = emb_attack(model, vc_tgt, adv_tgt, eps, n_iters)
    elif attack_type == "fb":
        adv_inp = fb_attack(model, vc_src, vc_tgt, adv_tgt, eps, n_iters)
    else:
        raise NotImplementedError()

    adv_inp = adv_inp.squeeze(0).T
    adv_inp = denormalize(adv_inp.data.cpu().numpy(), attr)
    adv_inp = mel2wav(adv_inp, **config["preprocess"])

    sf.write(output, adv_inp, config["preprocess"]["sample_rate"])
def infer(data_filepath='data/flowers.hdf5',
          z_dim=128,
          out_dir='gan',
          n_steps=10):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, 1))
    emb_fixed, txt_fixed = val_data[1], val_data[2]

    z_start = np.random.uniform(-1, 1, size=(1, z_dim))
    z_end = np.random.uniform(-1, 1, size=(1, z_dim))

    G.trainable = False
    for i in range(n_steps + 1):
        p = i / float(n_steps)
        z = z_start * (1 - p) + z_end * p
        fake_image = G.predict([z, emb_fixed])[0]
        img = ((fake_image + 1) * 0.5)
        plt.imsave("{}/fake_z_interpolation_i{}".format(out_dir, i), img)
        print(i,
              str(txt_fixed[0]).strip(),
              file=open("{}/fake_z_interpolation.txt".format(out_dir), "a"))
コード例 #3
0
        training_cutoff=validation_cutoff, validation_cutoff=None)

    #%% Train and Save Models

    models = []
    regions_val = list(set(df_validation[region_col]))
    dates_val = sorted(list(set(df_validation[date_col])))
    validation_predictions = {}

    if train_bilstm:
        bilstm = BILSTMModel(**bilstm_params_dict)
        bilstm.fit(df_train, 'first')
        models.append('bilstm')
        save_model(bilstm, bilstm_file.replace(".pickle", "_train.pickle"))
    if load_bilstm:
        bilstm = load_model(bilstm_file.replace(".pickle", "_train.pickle"))
        validation_predictions['bilstm'] = bilstm.predict(
            regions_val, dates_val, 'first')
        models.append('bilstm')

    if train_sir:
        sir = SIRModel(**sir_params_dict)
        sir.fit(df_train)
        save_model(sir, sir_file.replace(".pickle", "_train.pickle"))

    if load_sir:
        sir = load_model(sir_file.replace(".pickle", "_train.pickle"))
        try:
            validation_predictions['sir'] = sir.predict(regions_val, dates_val)
        except:
            pass
コード例 #4
0
warnings.filterwarnings('ignore')

# %%

df, df_train, df_test = load_data(training_cutoff=training_cutoff,
                                  validation_cutoff=validation_cutoff)

regions = list(set(df_test[region_col]))
dates = list(set(df_test[date_col]))

# %% Load Models and Make Predictions

output = {}
models = []
if load_sir:
    sir = load_model(sir_file)
    output['sir'] = sir.predict(regions, dates)
    models.append('sir')

if load_knn:
    knn = load_model(knn_file)
    output['knn'] = knn.predict(regions, dates)
    models.append('knn')

if load_mdp:
    mdp = load_model(mdp_file)
    output['mdp'] = mdp.predict(regions, dates)
    models.append('mdp')

if load_bilstm:
    bilstm = load_model(bilstm_file)
コード例 #5
0
def train_flair(model_name=''):
    # Init data
    train_dataset, val_dataset = prepare_datasets_FLAIR()
    train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=10, shuffle=True)
    loaders = dict(train=train_loader, val=val_loader)

    # Init Model
    if model_name == '':
        model = UNetFLAIR().cuda()
        optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, amsgrad=True)
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer,
                                                           gamma=0.984)
        loss_fn = nn.BCELoss()
    else:
        model = data_utils.load_model(model_name)

    epochs = 500
    epoch_losses = dict(train=[], val=[])
    for epoch in range(epochs):
        for phase in 'train val'.split():
            if phase == 'train':
                model = model.train()
                torch.set_grad_enabled(True)

            else:
                model = model.eval()
                torch.set_grad_enabled(False)

            loader = loaders[phase]
            running_loss = []

            for batch in loader:
                imgs, masks = batch
                imgs = imgs.cuda()
                masks = masks.cuda()

                outputs = model(imgs)
                loss = loss_fn(outputs, masks)

                running_loss.append(loss.item())

                if phase == 'train':
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

            # End of Epoch
            print(f'{epoch}) {phase} loss: {np.mean(running_loss)}')
            visualize_results(loader, model, epoch, phase)

            if epoch % 10 == 0:
                results_dir = 'weight_flair/'
                if not os.path.isdir(results_dir):
                    os.makedirs(results_dir)

                data_utils.save_model(model, results_dir + f'model_{epoch}.pt')

            epoch_losses[phase].append(np.mean(running_loss))
            if phase == 'val':
                df = pd.DataFrame(data=epoch_losses)
                df.to_csv('loss.csv')
            tensorboard(epoch_losses[phase], phase)

            if phase == 'train':
                scheduler.step()
コード例 #6
0
def load_model(filename):
    assert os.path.exists('models/{}.pkl'.format(filename))
    return d.load_model(filename)
コード例 #7
0
ファイル: main_train.py プロジェクト: oskali/mit_cassandra
        training_cutoff=training_agg_cutoff, validation_cutoff=training_cutoff)

    # %% Train and Save Models

    models = []
    regions_val = list(set(df_validation[region_col]))
    dates_val = list(set(df_validation[date_col]))
    validation_predictions = {}

    if train_sir:
        sir = SIRModel(**sir_params_dict)
        sir.fit(df_train)
        save_model(sir, sir_file)

    if load_sir:
        sir = load_model(sir_file)
        try:
            validation_predictions['sir'] = sir.predict(regions_val, dates_val)
        except:
            pass
        models.append('sir')

    if train_knn:
        knn = KNNModel(**knn_params_dict)
        knn.fit(df_train)
        models.append('knn')
        save_model(knn, knn_file)
    if load_knn:
        knn = load_model(knn_file)
        try:
            validation_predictions['knn'] = knn.predict(regions_val, dates_val)
コード例 #8
0
    def sample(self,  t_0: str, n_samples: int, dates: list, input_samples: dict, validation_cutoff:str) -> dict:

        dir_path = os.path.dirname(os.path.abspath(__file__))
        repo_path = os.path.join(dir_path, '..', '..', '..')

        np.random.seed(self.model_parameters['random_seed'])
        random_state = self.model_parameters['random_seed']

        sir_file = pd.read_csv(os.path.join(
            repo_path, self.model_parameters['sir_file']))

        knn_file = pd.read_csv(os.path.join(
            repo_path, self.model_parameters['knn_file']))

        mdp_file = pd.read_csv(os.path.join(
            repo_path, self.model_parameters['mdp_file']))

        bilstm_file = pd.read_csv(os.path.join(
            repo_path, self.model_parameters['bilstm_file']))

        agg_file = pd.read_csv(os.path.join(
            repo_path, self.model_parameters['agg_file']))

        ci_file = pd.read_csv(os.path.join(
            repo_path, self.model_parameters['ci_file']))

        ci_file = pd.read_csv(os.path.join(
            repo_path, self.model_parameters['preval_file']))

        regions = ['Massachusetts', 'New York']
        output = {}

        if any([datetime.datetime.strptime(validation_cutoff, '%Y-%m-%d') <= date for date in dates]):
            raise Exception('Prediction dates appear in the training data. Please make predictions for a date after ' + validation_cutoff)

        sir = load_model(sir_file)
        output['sir'] = sir.predict(regions, dates)

        knn = load_model(knn_file)
        output['knn'] = knn.predict(regions, dates)

        mdp = load_model(mdp_file)
        output['mdp'] = mdp.predict(regions, dates)

        bilstm = load_model(bilstm_file)
        output['bilstm'] = bilstm.predict(regions, dates)

        agg = load_model(agg_file)
        output['agg'] = agg.predict(regions, dates, output)

        ci = load_model(ci_file)

        preval = load_model(preval_file)

        sampled_output = ci.sample(output, n_samples, random_state)
        model_type = 'agg'
        prediction_distribution = dict.fromkeys(regions)
        for state in regions:
            predictions = sampled_output[model_type][state]
            prediction_distribution[state] = predictions
        all_samples = []
        date_list = sampled_output[model_type][regions[0]][0].index.strftime('%Y-%m-%d').tolist()
        samples = dict(dates=date_list, samples=None)
        for t_i in range(n_samples):
            sample_dict = dict.fromkeys(regions)
            for state in regions:
                sample_dict[state] = list(prediction_distribution[state][t_i])
            all_samples.append(sample_dict)
        samples['samples'] = all_samples
        samples['samples'] = [[{state:samples['samples'][i][state][date] for state in samples['samples'][0].keys()} for date in range(len(samples['dates']))] for i in range(n_samples)]
        preval = load_model(preval_file)
        samples = preval.convert(samples)
        return samples