def train(features,
          fea_len,
          split_frac,
          out_file,
          save=False,
          save_folder=None):
    '''
	hyperparameters: 
		features
		amount of training data
		feature length
	'''
    if isinstance(out_file, str):
        out_file = open(out_file, 'w')
    d = Dataset(features, split_frac, 1, gpu)
    print 'defining architecture'
    enc = ChainEncoder(d.get_v_fea_len(), d.get_e_fea_len(), fea_len, 'last')
    predictor = Predictor(fea_len)
    loss = nn.NLLLoss()
    if gpu:
        enc.cuda()
        predictor.cuda()
        loss.cuda()

    optimizer = optim.Adam(
        list(enc.parameters()) + list(predictor.parameters()))

    print 'training'
    test_v_features, test_e_features, test_A_pls, test_B_pls, test_y = d.get_test_pairs(
    )
    test_y = test_y.data.cpu().numpy()
    for train_iter in xrange(12000):
        v_features, e_features, A_pls, B_pls, y = d.get_train_pairs(100)
        enc.zero_grad()
        predictor.zero_grad()
        A_code, B_code = encode(enc, fea_len, v_features, e_features, A_pls,
                                B_pls)
        softmax_output = predictor(A_code, B_code)
        loss_val = loss(softmax_output, y)
        loss_val.backward()
        optimizer.step()

        enc.zero_grad()
        predictor.zero_grad()
        test_A_code, test_B_code = encode(enc, fea_len, test_v_features,
                                          test_e_features, test_A_pls,
                                          test_B_pls)
        softmax_output = predictor(test_A_code, test_B_code).data.cpu().numpy()
        test_y_pred = softmax_output.argmax(axis=1)
        cur_acc = (test_y_pred == test_y).sum() / len(test_y)
        out_file.write('%f\n' % cur_acc)
        out_file.flush()
        if save and train_iter % 50 == 0:
            if save_folder[-1] == '/':
                save_folder = save_folder[:-1]
            torch.save(enc.state_dict(),
                       '%s/%i_enc.model' % (save_folder, train_iter))
            torch.save(predictor.state_dict(),
                       '%s/%i_pred.model' % (save_folder, train_iter))
    out_file.close()
示例#2
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm']
    )
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']
    data_loader = make_data_loader(model.lookback, params)

    train_data_loader = data_loader(0, val_split, eval=False)
    val_data_loader = data_loader(val_split, test_split, eval=True)
    test_data_loader = data_loader(test_split, 1, eval=True)
示例#3
0
def main():
    FLAGS = build_parser()
    input_image = FLAGS.input_image
    model_dir = FLAGS.model_directory

    predictor = Predictor(model_dir)
    preds = predictor.predict(input_image)
    label_preds = [p[1] for p in [x for x in preds]]
    print(label_preds)
示例#4
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    # if params['cuda']:
    #     model = model.cuda()
    #     predictor = predictor.cuda()
    #
    # # Adam lr = 0.001, betas=(0.9, 0.999) weight_decay=0

    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']
    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, load_best=False)

    data_loader = make_data_loader(model.lookback, params)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        predictor.load_state_dict(state_dict)
        generator = Generator(predictor.model, cuda=True)
    else:
        raise FileNotFoundError('There is no valid checkpoint.')
    save_path = '/mnt/IDMT-WORKSPACE/DATA-STORE/xiaoyg/samplernn/generated_audio'
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    for i in range(0, 10):
        y = generator(
            1000,
            160000,
            i,
            data_seed=data_loader(val_split, test_split,
                                  eval=True).dataset).float().numpy()
        for j in range(len(y)):
            path = os.path.join(save_path,
                                '{}_{}.wav'.format(class_label[i], j))
            wav = y[j, :].astype(np.int16)
            wav.resize(wav.size, 1)
            write_wav(path, wav, 16000, 'PCM_16')
示例#5
0
def train(features, fea_len, split_frac, out_file):
    if isinstance(out_file, str):
        out_file = open(out_file, 'w')
    d = Dataset(features, split_frac, gpu)
    print 'defining architecture'
    enc = ChainEncoder(d.get_v_fea_len(), d.get_e_fea_len(), fea_len, 'last')
    predictor = Predictor(fea_len)
    loss = nn.NLLLoss()
    if gpu:
        enc.cuda()
        predictor.cuda()
        loss.cuda()

    optimizer = optim.Adam(
        list(enc.parameters()) + list(predictor.parameters()))

    print 'training'
    test_chain_A, test_chain_B, test_y = d.get_test_pairs()
    test_y = test_y.data.cpu().numpy()
    for train_iter in xrange(4000):
        chains_A, chains_B, y = d.get_train_pairs(1000)
        enc.zero_grad()
        predictor.zero_grad()
        output_A = enc(chains_A)
        output_B = enc(chains_B)
        softmax_output = predictor(output_A, output_B)
        loss_val = loss(softmax_output, y)
        loss_val.backward()
        optimizer.step()

        enc.zero_grad()
        predictor.zero_grad()
        output_test_A = enc(test_chain_A)
        output_test_B = enc(test_chain_B)
        softmax_output = predictor(output_test_A,
                                   output_test_B).data.cpu().numpy()
        test_y_pred = softmax_output.argmax(axis=1)
        cur_acc = (test_y_pred == test_y).sum() / len(test_y)
        print 'test acc:', cur_acc
        out_file.write('%f\n' % cur_acc)
        if train_iter % 50 == 0:
            torch.save(enc.state_dict(), 'ckpt/%i_encoder.model' % train_iter)
            torch.save(predictor.state_dict(),
                       'ckpt/%i_predictor.model' % train_iter)
    out_file.close()
示例#6
0
def main():
    model = SampleRNN(
        frame_sizes=[16, 4], n_rnn=1, dim=1024, learn_h0=True, q_levels=256
    )
    predictor = Predictor(model).cuda()
    predictor.load_state_dict(torch.load('model.tar'))

    generator = Generator(predictor.model, cuda=True)

    t = time()
    samples = generator(5, 16000)
    print('generated in {}s'.format(time() - t))

    write_wav(
        'sample.wav',
        samples.cpu().float().numpy()[0, :],
        sr=16000,
        norm=True
    )
示例#7
0
def define_model(args, data_shape, placeholders):

    input_dim = 16110
    label_dim = data_shape[0] - input_dim

    if args.model_type == 'Classifier' or 'Predictor':
        model = Predictor(placeholders, input_dim, label_dim, args)

    else:
        model = None
        print("beep beep invalid model type")

    return model
示例#8
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)

    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    samples_path = os.path.join(results_path, 'samples')
    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      checkpoints_path,
                      samples_path,
                      params['n_samples'],
                      params['sample_length'],
                      params['sample_rate'],
                      data_loader(val_split, test_split, eval=True),
                      data_loader(test_split, 1, eval=True),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
    trainer.run(params['epoch_limit'])
示例#9
0
    inp = dir_path + "/" + str(0) + ".png"
    inp = prepare_dataset(inp)
    input_box.append(inp)
    img = dir_path + "/" + str(index) + ".png"
    img = prepare_dataset(img)
    frame_box.append(img)

xtest = chainer.as_variable(xp.array(input_box).astype(xp.float32))
ctest = chainer.as_variable(xp.array(frame_box).astype(xp.float32))

test = prepare_dataset("./test.png")
test = chainer.as_variable(xp.array(test).astype(xp.float32)).reshape(
    1, 3, size, size)
test = F.tile(test, (framesize, 1, 1, 1))

predictor = Predictor()
predictor.to_gpu()
pre_opt = set_optimizer(predictor)

discriminator_content = Discriminator_image()
discriminator_content.to_gpu()
dis_c_opt = set_optimizer(discriminator_content)

discriminator_sequence = Discriminator_stream()
discriminator_sequence.to_gpu()
dis_s_opt = set_optimizer(discriminator_sequence)

feature_extractor = EncDec()
feature_extractor.to_gpu()
fextract_opt = set_optimizer(feature_extractor)
serializers.load_npz("./encdec.model", feature_extractor)
示例#10
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    # Model
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm'],
        batch_size=params['batch_size']
    )
    print("CUDA num: {}".format(torch.cuda.device_count()))
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    model_cnnseq2sample = CNNSeq2SampleRNN(params).cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    data_loader_test = make_data_loader(model.lookback, params, npz_filename=params['npz_filename_test'])
    # test_split = 1 - params['test_frac']
    # val_split = test_split - params['val_frac']

    trainer = Trainer(
        predictor, model_cnnseq2sample, sequence_nll_loss_bits, optimizer,
        # data_loader(0, val_split, eval=False),
        data_loader(0, 1, eval=False),
        cuda=params['cuda']
    )

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    checkpoint_data_cnnseq2sample = load_last_checkpoint(checkpoints_path, model_type='cnnseq2sample')
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        (state_dict_cnnseq2sample, epoch, iteration) = checkpoint_data_cnnseq2sample
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
        model_cnnseq2sample.load_state_dict(state_dict_cnnseq2sample)

    trainer.register_plugin(TrainingLossMonitor(
        smoothing=params['loss_smoothing']
    ))
    trainer.register_plugin(ValidationPlugin(
        # data_loader(val_split, test_split, eval=True),
        # data_loader_test(0, 1, eval=True)
        data_loader_test(0, params['val_frac'], eval=True),
        data_loader_test(params['val_frac'], 1, eval=True)
        # data_loader(test_split, 1, eval=True)
    ))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(SaverPlugin(
        checkpoints_path, params['keep_old_checkpoints']
    ))
    trainer.register_plugin(GeneratorPlugin(
        os.path.join(results_path, 'samples'), params['n_samples'],
        params['sample_length'], params['sample_rate']
    ))
    trainer.register_plugin(
        Logger([
            'training_loss',
            'validation_loss',
            'test_loss',
            'time'
        ])
    )
    trainer.register_plugin(StatsPlugin(
        results_path,
        iteration_fields=[
            'training_loss',
            ('training_loss', 'running_avg'),
            'time'
        ],
        epoch_fields=[
            'validation_loss',
            'test_loss',
            'time'
        ],
        plots={
            'loss': {
                'x': 'iteration',
                'ys': [
                    'training_loss',
                    ('training_loss', 'running_avg'),
                    'validation_loss',
                    'test_loss',
                ],
                'log_y': True
            }
        }
    ))

    trainer.run(params['epoch_limit'])
示例#11
0
torch.manual_seed(option.seed)

# デバイスの設定
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)

# データローダの用意
train_set = ReberGrammarSet('reber_train_2.4M.txt', option.length)
train_data_loader = DataLoader(dataset=train_set,
                               batch_size=option.batchSize,
                               shuffle=True)
test_set = ReberGrammarSet('reber_test_1M.txt', option.length)
test_data_loader = DataLoader(dataset=test_set, batch_size=1)

# 分類器と誤差関数の用意
predictor = Predictor(Util.num_characters, option.hidden_size).to(device)
criterion = nn.CrossEntropyLoss()

# Adamオプティマイザを用意
optimizer = optim.Adam(predictor.parameters(), lr=option.lr)

# ネットワーク構造の表示
print(predictor)


def train():
    mb = master_bar(range(option.nEpochs))

    for epoch in mb:
        start_time = time.time()
示例#12
0
def main(exp, dataset, **params):
    params = dict(default_params, exp=exp, dataset=dataset, **params)
    print(params)
    storage_client = None
    bucket = None

    path = os.path.join(params['datasets_path'], params['dataset'])

    if params['bucket']:
        storage_client = storage.Client()
        bucket = Bucket(storage_client, params['bucket'])
        preload_dataset(path, storage_client, bucket)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    (quantize, dequantize) = quantizer(params['q_method'])
    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model, dequantize)
    if params['cuda'] is not False:
        print(params['cuda'])
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['learning_rate']))

    data_loader = make_data_loader(path, model.lookback, quantize, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, storage_client,
                                           bucket)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(SchedulerPlugin(params['lr_scheduler_step']))

    def upload(file_path):
        if bucket is None:
            return

        name = file_path.replace(os.path.abspath(os.curdir) + '/', '')
        blob = Blob(name, bucket)
        try:
            blob.upload_from_filename(file_path, timeout=300)
        except Exception as e:
            print(str(e))

    trainer.register_plugin(AbsoluteTimeMonitor())

    samples_path = os.path.join(results_path, 'samples')
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints'], upload))
    trainer.register_plugin(
        GeneratorPlugin(samples_path,
                        params['n_samples'],
                        params['sample_length'],
                        params['sample_rate'],
                        params['q_levels'],
                        dequantize,
                        params['sampling_temperature'],
                        upload=upload))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(
            results_path,
            iteration_fields=[
                'training_loss',
                #('training_loss', 'running_avg'),
                'time'
            ],
            epoch_fields=[
                'training_loss', ('training_loss', 'running_avg'),
                'validation_loss', 'test_loss', 'time'
            ],
            plots={
                'loss': {
                    'x':
                    'iteration',
                    'ys': [
                        'training_loss',
                        # ('training_loss', 'running_avg'),
                        'validation_loss',
                        'test_loss'
                    ],
                    'log_y':
                    True
                }
            }))

    init_comet(params, trainer, samples_path, params['n_samples'],
               params['sample_rate'])

    trainer.run(params['epoch_limit'])
示例#13
0
文件: main.py 项目: Tyqnn0323/DAT
def main():
    # Data Loader (Input Pipeline)
    print('loading dataset...')
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               num_workers=4,
                                               drop_last=True,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              num_workers=4,
                                              drop_last=True,
                                              shuffle=False)
    # Define models
    print('building model...')
    g = Generator(input_channel=input_channel)
    h = Predictor(n_outputs=num_classes)
    h2 = Predictor(n_outputs=num_classes)
    g.cuda()
    h.cuda()
    h2.cuda()
    print(g.parameters, h.parameters, h2.parameters)

    optimizer_g = torch.optim.Adam(g.parameters(), lr=learning_rate)
    optimizer_h = torch.optim.Adam(h.parameters(), lr=learning_rate)
    optimizer_h2 = torch.optim.Adam(h2.parameters(), lr=learning_rate)

    with open(txtfile, "a") as myfile:
        myfile.write('epoch: train_acc test_acc\n')

    epoch = 0
    train_acc = 0

    # evaluate models with random weights
    test_acc = evaluate(test_loader, g, h)
    print('Epoch [%d/%d], Test Accuracy: %.4f' %
          (epoch + 1, args.n_epoch, test_acc))

    # save results
    with open(txtfile, "a") as myfile:
        myfile.write(
            str(int(epoch)) + ': ' + str(train_acc) + ' ' + str(test_acc) +
            "\n")

    best_ce_acc = 0
    # training
    for epoch in range(1, args.n_epoch):
        # train models
        g.train()
        h.train()
        h2.train()
        adjust_learning_rate(optimizer_g, epoch)
        adjust_learning_rate(optimizer_h, epoch)
        adjust_learning_rate(optimizer_h2, epoch)
        train_acc = train(train_loader, epoch, g, optimizer_g, h, optimizer_h,
                          h2, optimizer_h2)

        # evaluate models
        test_acc = evaluate(test_loader, g, h)
        print(
            'Epoch [%d/%d], Training Accuracy: %.4F %%, Test Accuracy: %.4F %%'
            % (epoch + 1, args.n_epoch, train_acc * 100, test_acc * 100))
        with open(txtfile, "a") as myfile:
            myfile.write(
                str(int(epoch)) + ': ' + str(train_acc) + ' ' + str(test_acc) +
                "\n")
示例#14
0
def main(frame_sizes, **params):

    use_cuda = torch.cuda.is_available()

    params = dict(default_params, frame_sizes=frame_sizes, **params)

    # Redefine parameters listed in the experiment directory and separated with '~'
    for i in params['model'].split('/')[1].split('~'):
        param = i.split(':')
        if param[0] in params:
            params[param[0]] = as_type(param[1], type(params[param[0]]))
    # Define npy file names with maximum and minimum values of de-normalized conditioners
    npy_name_min_max_cond = 'npy_datasets/min_max' + params['norm_ind'] * '_ind' + (not params['norm_ind']) * '_joint' \
                            + params['static_spk'] * '_static' + '.npy'

    # Define npy file name with array of unique speakers in dataset
    npy_name_spk_id = 'npy_datasets/spk_id.npy'

    # Get file names from partition's list
    file_names = open(
        str(params['datasets_path']) + 'generate_cond_gina.list',
        'r').read().splitlines()

    spk_names = open(
        str(params['datasets_path']) + 'generate_spk_gina.list',
        'r').read().splitlines()

    datasets_path = os.path.join(params['datasets_path'], params['cond_set'])

    spk = np.load(npy_name_spk_id)

    if len(spk_names) != len(file_names):
        print(
            'Length of speaker file do not match length of conditioner file.')
        quit()

    print('Generating', len(file_names), 'audio files')

    for i in range(len(file_names)):
        print('Generating Audio', i)
        print('Generating...', file_names[i])

        # Load CC conditioner
        c = np.loadtxt(datasets_path + file_names[i] + '.cc')

        # Load LF0 conditioner
        f0file = np.loadtxt(datasets_path + file_names[i] + '.lf0')
        f0, _ = interpolation(f0file, -10000000000)
        f0 = f0.reshape(f0.shape[0], 1)

        # Load FV conditioner
        fvfile = np.loadtxt(datasets_path + file_names[i] + '.gv')
        fv, uv = interpolation(fvfile, 1e3)
        num_fv = fv.shape[0]
        uv = uv.reshape(num_fv, 1)
        fv = fv.reshape(num_fv, 1)

        # Load speaker conditioner
        speaker = np.where(spk == spk_names[i])[0][0]

        cond = np.concatenate((c, f0), axis=1)
        cond = np.concatenate((cond, fv), axis=1)
        cond = np.concatenate((cond, uv), axis=1)

        # Load maximum and minimum of de-normalized conditioners
        min_cond = np.load(npy_name_min_max_cond)[0]
        max_cond = np.load(npy_name_min_max_cond)[1]

        # Normalize conditioners with absolute maximum and minimum for each speaker of training partition
        if params['norm_ind']:
            print(
                'Normalizing conditioners for each speaker of training dataset'
            )
            cond = (cond - min_cond[speaker]) / (max_cond[speaker] -
                                                 min_cond[speaker])
        else:
            print('Normalizing conditioners jointly')
            cond = (cond - min_cond) / (max_cond - min_cond)

        print('Shape cond', cond.shape)
        if params['look_ahead']:
            delayed = np.copy(cond)
            delayed[:-1, :] = delayed[1:, :]
            cond = np.concatenate((cond, delayed), axis=1)
            print('Shape cond after look ahead', cond.shape)

        print(cond.shape)
        seed = params.get('seed')
        init_random_seed(seed, use_cuda)

        spk_dim = len([
            i for i in os.listdir(
                os.path.join(params['datasets_path'], params['cond_set']))
            if os.path.islink(
                os.path.join(params['datasets_path'], params['cond_set']) +
                '/' + i)
        ])

        print('Start Generate SampleRNN')
        model = SampleRNN(frame_sizes=params['frame_sizes'],
                          n_rnn=params['n_rnn'],
                          dim=params['dim'],
                          learn_h0=params['learn_h0'],
                          q_levels=params['q_levels'],
                          ulaw=params['ulaw'],
                          weight_norm=params['weight_norm'],
                          cond_dim=params['cond_dim'] *
                          (1 + params['look_ahead']),
                          spk_dim=spk_dim,
                          qrnn=params['qrnn'])
        print(model)

        if use_cuda:
            model = model.cuda()
            predictor = Predictor(model).cuda()
        else:
            predictor = Predictor(model)

        f_name = params['model']
        model_data = load_model(f_name)

        if model_data is None:
            sys.exit('ERROR: Model not found in' + str(f_name))
        (state_dict, epoch_index, iteration) = model_data
        print('OK: Read model', f_name, '(epoch:', epoch_index, ')')
        print(state_dict)
        predictor.load_state_dict(state_dict)

        original_name = file_names[i].split('/')[1]
        if original_name == "..":
            original_name = file_names[i].split('/')[3]

        generator = RunGenerator(model=model,
                                 sample_rate=params['sample_rate'],
                                 cuda=use_cuda,
                                 epoch=epoch_index,
                                 cond=cond,
                                 spk_list=spk,
                                 speaker=speaker,
                                 checkpoints_path=f_name,
                                 original_name=original_name)

        generator(params['n_samples'], params['sample_length'], cond, speaker)
示例#15
0
文件: main.py 项目: ElliotTuck/SBFST
        for e1, e2 in zip(indx, b_label.numpy()):
            if e1[0] == e2:
                correct += 1
        acc = 1.0 * correct / data.shape[0]
        return acc


voc_size = len(Sigma)
lstm_dim = 10
batch_size = 128
num_of_layers = 1
num_of_directions = 1
num_epochs = 300
clip = 1.0

predictor = Predictor(voc_size, lstm_dim)
optimizer = optim.Adam(predictor.parameters())
criterion = nn.NLLLoss()

best_dev_acc = 0.0
best_model_wts = copy.deepcopy(predictor.state_dict())
best_test1_acc = 0.0
best_test2_acc = 0.0
best_test3_acc = 0.0
best_epoch_num = 0

total_epoch_num = 0
all_losses = []
all_acc_1 = []
all_acc_2 = []
all_acc_3 = []
def main(exp, frame_sizes, dataset, **params):
    scheduler = True
    use_cuda = torch.cuda.is_available()
    print('Start Sample-RNN')
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )
    seed = params.get('seed')
    init_random_seed(seed, use_cuda)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    spk_dim = len([i for i in os.listdir(os.path.join(params['datasets_path'], params['dataset']))
                   if os.path.islink(os.path.join(params['datasets_path'], params['dataset']) + '/' + i)])

    print('Create model')
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        ulaw=params['ulaw'],
        weight_norm=params['weight_norm'],
        cond_dim=params['cond_dim']*(1+params['look_ahead']),
        spk_dim=spk_dim,
        qrnn=params['qrnn']
    )
    if use_cuda:
        model = model.cuda()
        predictor = Predictor(model).cuda()
    else:
        predictor = Predictor(model)

    print('Done!')
    f_name = params['model']
    if f_name is not None:
        print('pre train with', f_name)
        model_data = load_model(f_name)
        if model_data is None:
            sys.exit('ERROR: Model not found in' + str(f_name))
        (state_dict, epoch_index, iteration) = model_data
        print('OK: Read model', f_name, '(epoch:', epoch_index, ')')
        print(state_dict)
        predictor.load_state_dict(state_dict)
    print('predictor', predictor)
    for name, param in predictor.named_parameters():
        print(name, param.size())

    optimizer = torch.optim.Adam(predictor.parameters(), lr=params['learning_rate'])
    if params['scheduler']:
        scheduler = MultiStepLR(optimizer, milestones=[15, 35], gamma=0.1)
    optimizer = gradient_clipping(optimizer)
    print('Saving results in path', results_path)
    print('Read data')
    data_loader = make_data_loader(model.lookback, params)
    print('Done!')
    data_model = data_loader('train')

    show_dataset = False
    if show_dataset:
        for i, full in enumerate(data_model):
            print('Data Loader---------------------------------------')
            print('batch', i)
            (data, reset, target, cond) = full           
            print('Data', data.size())
            print('Target', target.size())

    if not params['scheduler']:    
        scheduler = None
    if use_cuda:
        cuda = True
    else:
        cuda = False
    writer = SummaryWriter(log_dir='sample_board')
    trainer = Trainer(
        predictor, sequence_nll_loss_bits, optimizer,  data_model, cuda, writer, scheduler

    )

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(TrainingLossMonitor(
        smoothing=params['loss_smoothing']
    ))
    trainer.register_plugin(ValidationPlugin(
        data_loader('validation'),
        data_loader('test'),
        writer
    ))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(SaverPlugin(
        checkpoints_path, params['keep_old_checkpoints']
    ))

    trainer.register_plugin(
        Logger([
            'training_loss',
            'validation_loss',
            'test_loss',
            'time'
        ])
    )

    trainer.register_plugin(StatsPlugin(
        results_path,
        iteration_fields=[
            'training_loss',
            ('training_loss', 'running_avg'),
            'time'
        ],
        epoch_fields=[
            'validation_loss',
            'test_loss',
            'time'
        ],
        plots={
            'loss': {
                'x': 'iteration',
                'ys': [
                    'training_loss',
                    ('training_loss', 'running_avg'),
                    'validation_loss',
                    'test_loss',
                ],
                'log_y': True
            }
        }
    ))
    
    trainer.run(params['epoch_limit'])
示例#17
0
# For loop to vary the seed and train "training_qnt" times
for i in range(1, training_qnt + 1):
    # splitting Dataset samples in train and test
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=1 / 3,
                                                        random_state=None)

    # Get Regressors
    regressor_RF5 = get_rf_regressor(5, 'mse')
    regressor_RF20 = get_rf_regressor(20, 'mse')
    regressor_MLP55 = get_mlp_regressor((5, 5), 'tanh', 'lbfgs', 1000)
    regressor_MLP2020 = get_mlp_regressor((20, 20), 'tanh', 'lbfgs', 1000)

    y_pred_RF5 = Predictor(regressor_RF5, x_train, y_train, x_test).predict()
    y_pred_RF20 = Predictor(regressor_RF20, x_train, y_train, x_test).predict()
    y_pred_MLP55 = Predictor(regressor_MLP55, x_train, y_train,
                             x_test).predict()
    y_pred_MLP2020 = Predictor(regressor_MLP2020, x_train, y_train,
                               x_test).predict()

    results_RF5 = Results(y_test, y_pred_RF5, i, 'RF', 'RF5')
    results_RF20 = Results(y_test, y_pred_RF20, i, 'RF', 'RF20')
    results_MLP55 = Results(y_test, y_pred_MLP55, i, 'MLP', 'MLP55')
    results_MLP2020 = Results(y_test, y_pred_MLP2020, i, 'MLP', 'MLP2020')

    results_RF5.save_to_txt()
    results_RF20.save_to_txt()
    results_MLP55.save_to_txt()
    results_MLP2020.save_to_txt()
示例#18
0
res_x = x_train[0].shape[1]
res_y = x_train[0].shape[2]
num_c = x_train[0].shape[3]
batch_size = 64
n_epochs = 100


x_train = np.expand_dims(x_train, -1)
x_train = np.squeeze(np.swapaxes(x_train, 1, -1))
x_train = np.reshape(x_train, [len(x_train), res_x, res_y, -1])


model = Predictor(H=res_x,
                  W=res_y,
                  C=54,
                  l_rate=0.1,
                  n_filters=16,
                  n_layers=3,
                  n_outs=1,
                  p_type='regressor')

sess = tf.Session()
sess.run(tf.initializers.global_variables())


for n in range(n_epochs):
    for b in (range(0, n_instances, batch_size)):
        x = np.asarray(x_train[b:b+batch_size])
        t = np.asarray(t_train[b:b+batch_size])

        feed = {model.x: x,
                model.t: t,
    # Get dimensions of the data
    input_dim = next(iter(dataloader_train))[0].shape[1]
    protected_dim = next(iter(dataloader_train))[2].shape[1]
    output_dim = next(iter(dataloader_train))[1].shape[1]

    # Fairness method
    equality_of_odds = True

    # Initialize the predictor based on the dataset
    if args.dataset_name == 'images':
        predictor = ImagePredictor(input_dim, output_dim).to(device)
        equality_of_odds = False
        pytorch_total_params = sum(p.numel() for p in predictor.parameters() if p.requires_grad)
        logger.info(f'Number of trainable parameters: {pytorch_total_params}')
    else:
        predictor = Predictor(input_dim).to(device)

    # Initialize the adversary
    adversary = Adversary(input_dim=output_dim, protected_dim=protected_dim, equality_of_odds=equality_of_odds).to(device) if args.debias else None

    # Initialize optimizers
    optimizer_P = torch.optim.Adam(predictor.parameters(), lr=args.predictor_lr)
    optimizer_A = torch.optim.Adam(adversary.parameters(), lr=args.adversary_lr) if args.debias else None

    # Setup the learning rate scheduler
    utils.decayer.step_count = 1
    if args.lr_scheduler == 'exp':
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer_P, gamma=0.96) if args.debias else None
    elif args.lr_scheduler == 'lambda':
        scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer_P, utils.decayer) if args.debias else None
示例#20
0
    n_dataset = train_images.shape[0]

    train_data_size = 200 if debug else int(n_dataset * 0.9)
    valid_data_size = 100 if debug else int(n_dataset - train_data_size)
    perm = np.random.RandomState(random_seed).permutation(n_dataset)
    train_dataset = BengaliAIDataset(train_images,
                                     train_labels,
                                     transform=Transform(train_aug_config),
                                     indices=perm[:train_data_size])
    valid_dataset = BengaliAIDataset(
        train_images,
        train_labels,
        transform=Transform(valid_aug_config),
        indices=perm[train_data_size:train_data_size + valid_data_size])

    predictor = Predictor(weights_path=trained_weights_path)
    classifier = Classifier(predictor).to(device)

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=batch_size,
                              shuffle=False)

    optimizer = torch.optim.Adam(classifier.parameters(), lr=initial_lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode='min',
                                                           factor=0.5,
                                                           patience=2,
                                                           min_lr=1e-10)

def wrap(input):
    if torch.is_tensor(input):
        input = Variable(input)
        if params['cuda']:
            input = input.cuda()
    return input


for data in dataset:
    batch_inputs = data[:-1]
    batch_target = data[-1]
    batch_inputs = list(map(wrap, batch_inputs))

    batch_target = Variable(batch_target)
    if params['cuda']:
        batch_target = batch_target.cuda()

    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    prediction = predictor(*batch_inputs)  # , reset=False)
    prediction_data = prediction.data
    print(prediction)

    # Predict audios from 1 samples
    break
示例#22
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    os.environ['CUDA_VISIBLE_DEVICES'] = params['gpu']

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'],
                      dropout=params['dropout'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['lr']))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    criterion = sequence_nll_loss_bits

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, params)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        start_epoch = int(epoch)
        global_step = iteration
        start_epoch = iteration
        predictor.load_state_dict(state_dict)
    else:
        start_epoch = 0
        global_step = 0

    #writer = SummaryWriter("runs/{}-{}".format(params['dataset'], str(datetime.datetime.now()).split('.')[0].replace(' ', '-')))
    writer = SummaryWriter(
        os.path.join(
            results_path, "{}-{}".format(
                params['dataset'],
                str(datetime.datetime.now()).split('.')[0].replace(' ', '-'))))
    dataset_train = data_loader(0, val_split, eval=False)
    dataset_val = data_loader(val_split, test_split, eval=True)
    dataset_test = data_loader(test_split, 1, eval=True)

    generator = Generator(predictor.model, params['cuda'])
    best_val_loss = 10000000000000

    for e in range(start_epoch, int(params['epoch_limit'])):
        for i, data in enumerate(dataset_train):

            batch_inputs = data[:-1]
            batch_target = data[-1]

            def wrap(input):
                if torch.is_tensor(input):
                    input = torch.autograd.Variable(input)
                    if params['cuda']:
                        input = input.cuda()
                return input

            batch_inputs = list(map(wrap, batch_inputs))

            batch_target = torch.autograd.Variable(batch_target)
            if params['cuda']:
                batch_target = batch_target.cuda()

            plugin_data = [None, None]

            def closure():
                batch_output = predictor(*batch_inputs)

                loss = criterion(batch_output, batch_target)
                loss.backward()

                if plugin_data[0] is None:
                    plugin_data[0] = batch_output.data
                    plugin_data[1] = loss.data

                return loss

            optimizer.zero_grad()
            optimizer.step(closure)
            train_loss = plugin_data[1]

            # stats: iteration
            writer.add_scalar('train/train loss', train_loss, global_step)
            print("E:{:03d}-S{:05d}: Loss={}".format(e, i, train_loss))
            global_step += 1

        # validation: per epoch
        predictor.eval()
        with torch.no_grad():
            loss_sum = 0
            n_examples = 0
            for data in dataset_val:
                batch_inputs = data[:-1]
                batch_target = data[-1]
                batch_size = batch_target.size()[0]

                def wrap(input):
                    if torch.is_tensor(input):
                        input = torch.autograd.Variable(input)
                        if params['cuda']:
                            input = input.cuda()
                    return input

                batch_inputs = list(map(wrap, batch_inputs))

                batch_target = torch.autograd.Variable(batch_target)
                if params['cuda']:
                    batch_target = batch_target.cuda()

                batch_output = predictor(*batch_inputs)

                loss_sum += criterion(batch_output,
                                      batch_target).item() * batch_size

                n_examples += batch_size

            val_loss = loss_sum / n_examples
            writer.add_scalar('validation/validation loss', val_loss,
                              global_step)
            print("== Validation Step E:{:03d}: Loss={} ==".format(
                e, val_loss))

        predictor.train()

        # saver: epoch
        last_pattern = 'ep{}-it{}'
        best_pattern = 'best-ep{}-it{}'
        if not params['keep_old_checkpoints']:
            pattern = os.path.join(checkpoints_path,
                                   last_pattern.format('*', '*'))
            for file_name in glob(pattern):
                os.remove(file_name)
        torch.save(
            predictor.state_dict(),
            os.path.join(checkpoints_path, last_pattern.format(e,
                                                               global_step)))

        cur_val_loss = val_loss
        if cur_val_loss < best_val_loss:
            pattern = os.path.join(checkpoints_path,
                                   last_pattern.format('*', '*'))
            for file_name in glob(pattern):
                os.remove(file_name)
            torch.save(
                predictor.state_dict(),
                os.path.join(checkpoints_path,
                             best_pattern.format(e, global_step)))
            best_val_loss = cur_val_loss

        generate_sample(generator, params, writer, global_step, results_path,
                        e)

    # generate final results
    generate_sample(generator, params, None, global_step, results_path, 0)
示例#23
0
def _get_predictor():
    model, vocab = load_model()
    predictor = Predictor(model, vocab)
    return predictor
示例#24
0
    a['Close'].replace(0, np.nan, inplace=True)
    a['Close'].fillna(method='ffill', inplace=True)
    values = a['Close'].values.reshape(-1, 1)
    values = values.astype('float32')
    scaler = MinMaxScaler(feature_range=(0, 1))
    scaled = scaler.fit_transform(values)
    train_size = int(len(scaled) * 0.8)
    valid_size = int(len(scaled) * 0.9)
    test_size = len(scaled) - train_size
    train, valid = scaled[0:train_size, :], scaled[train_size:valid_size, :]
    test = scaled[valid_size:, :]
    train_dict = create_data_dict(train, 4)
    test_dict = create_data_dict(valid, 4)
    model = LSTMRegressor(input_size=1,
                          hidden_size=32,
                          dropout_rate=0.5
                          )

    predictor = Predictor(model=model,
                          train_data=train_dict,
                          test_data=test_dict,
                          batch_size=64,
                          use_cuda=True)

    predictor.fit(300, 30)
    results = predictor.predict(predictor.test_loader).reshape(1, -1)[0]
    test_y = test_dict["y"].numpy().reshape(1, -1)[0]
    pyplot.plot(results, label='predict')
    pyplot.plot(test_y, label='true')
    pyplot.legend()
    pyplot.show()
示例#25
0
                    default=30,
                    help='number of output predicted frames')

parser.add_argument('--batch_size',
                    type=int,
                    default=8,
                    help='mini-batch size')
args = parser.parse_args()

if __name__ == '__main__':
    if not os.path.isdir(args.test_result_dir):
        os.makedirs(args.test_result_dir)

    # define the model
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    pred_model = Predictor(args).to(device)
    pred_model = nn.DataParallel(pred_model)

    # load checkpoint
    pred_model.load_state_dict(torch.load(args.checkpoint_load_file))
    print('Checkpoint is loaded from ' + args.checkpoint_load_file)

    # prepare dataloader for selected dataset
    if args.dataset == 'movingmnist':
        test_dataset = MovingMNIST(args.test_data_dir,
                                   seq_len=args.short_len + args.out_len,
                                   train=False)
        testloader = DataLoader(test_dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=args.workers,
示例#26
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path,
                                     'samples'), params['n_samples'],
                        params['sample_length'], params['sample_rate']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
示例#27
0
                    type=int,
                    default=5000,
                    help='number of iterations for warming up model')
parser.add_argument('--print_freq',
                    type=int,
                    default=1000,
                    help='frequency of printing logs')
args = parser.parse_args()

if __name__ == '__main__':
    if not os.path.isdir(args.checkpoint_save_dir):
        os.makedirs(args.checkpoint_save_dir)

    # define the model
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    pred_model = Predictor(args).to(device)
    pred_model = nn.DataParallel(pred_model)

    # optionally load checkpoint
    if args.checkpoint_load:
        pred_model.load_state_dict(torch.load(args.checkpoint_load_file))
        print('Checkpoint is loaded from ' + args.checkpoint_load_file)

    # prepare dataloader for selected dataset
    if args.dataset == 'movingmnist':
        train_dataset = MovingMNIST(args.train_data_dir,
                                    seq_len=args.short_len + args.out_len,
                                    train=True)
        trainloader = DataLoader(train_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=True,
示例#28
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)

    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
    else:
        trainer.epochs = 0
        trainer.iterations = 0
        torch.save(predictor,
                   os.path.join(checkpoints_path, "pytorch_model.bin"))
    # else:
    #     print("***** Saving fine-tuned model *****")
    #     output_model_file = os.path.join(params['results_path'], "pytorch_model.bin")
    #     if params['cuda']:
    #         torch.save(predictor, output_model_file)
    #     else:
    #         torch.save(predictor, output_model_file)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path, 'samples'),
                        params['n_samples'], params['sample_length'],
                        params['sample_rate'], params['sampling_temperature']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
示例#29
0
os.makedirs(samples_path, exist_ok=True)

# sys.stderr.write("available models are: {}".format(listdir(modeldir)))
modelpath = os.path.join(modeldir, modelname)

srnn_model1 = SampleRNN(frame_sizes=[4, 16],
                        n_rnn=2,
                        dim=1024,
                        learn_h0=True,
                        q_levels=256,
                        weight_norm=True)

if torch.cuda.is_available():
    srnn_model1 = srnn_model1.cuda()

predictor1 = Predictor(srnn_model1)

if torch.cuda.is_available():
    predictor1 = predictor1.cuda()

if torch.cuda.is_available():
    predictor1.load_state_dict(torch.load(modelpath)['model'])
else:
    predictor1.load_state_dict(
        torch.load(modelpath, map_location='cpu')['model'])

print("model loaded successfully!")

generate = Generator(srnn_model1, True)

import time