def train(features,
          fea_len,
          split_frac,
          out_file,
          save=False,
          save_folder=None):
    '''
	hyperparameters: 
		features
		amount of training data
		feature length
	'''
    if isinstance(out_file, str):
        out_file = open(out_file, 'w')
    d = Dataset(features, split_frac, 1, gpu)
    print 'defining architecture'
    enc = ChainEncoder(d.get_v_fea_len(), d.get_e_fea_len(), fea_len, 'last')
    predictor = Predictor(fea_len)
    loss = nn.NLLLoss()
    if gpu:
        enc.cuda()
        predictor.cuda()
        loss.cuda()

    optimizer = optim.Adam(
        list(enc.parameters()) + list(predictor.parameters()))

    print 'training'
    test_v_features, test_e_features, test_A_pls, test_B_pls, test_y = d.get_test_pairs(
    )
    test_y = test_y.data.cpu().numpy()
    for train_iter in xrange(12000):
        v_features, e_features, A_pls, B_pls, y = d.get_train_pairs(100)
        enc.zero_grad()
        predictor.zero_grad()
        A_code, B_code = encode(enc, fea_len, v_features, e_features, A_pls,
                                B_pls)
        softmax_output = predictor(A_code, B_code)
        loss_val = loss(softmax_output, y)
        loss_val.backward()
        optimizer.step()

        enc.zero_grad()
        predictor.zero_grad()
        test_A_code, test_B_code = encode(enc, fea_len, test_v_features,
                                          test_e_features, test_A_pls,
                                          test_B_pls)
        softmax_output = predictor(test_A_code, test_B_code).data.cpu().numpy()
        test_y_pred = softmax_output.argmax(axis=1)
        cur_acc = (test_y_pred == test_y).sum() / len(test_y)
        out_file.write('%f\n' % cur_acc)
        out_file.flush()
        if save and train_iter % 50 == 0:
            if save_folder[-1] == '/':
                save_folder = save_folder[:-1]
            torch.save(enc.state_dict(),
                       '%s/%i_enc.model' % (save_folder, train_iter))
            torch.save(predictor.state_dict(),
                       '%s/%i_pred.model' % (save_folder, train_iter))
    out_file.close()
Exemple #2
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm']
    )
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']
    data_loader = make_data_loader(model.lookback, params)

    train_data_loader = data_loader(0, val_split, eval=False)
    val_data_loader = data_loader(val_split, test_split, eval=True)
    test_data_loader = data_loader(test_split, 1, eval=True)
Exemple #3
0
def train(features, fea_len, split_frac, out_file):
    if isinstance(out_file, str):
        out_file = open(out_file, 'w')
    d = Dataset(features, split_frac, gpu)
    print 'defining architecture'
    enc = ChainEncoder(d.get_v_fea_len(), d.get_e_fea_len(), fea_len, 'last')
    predictor = Predictor(fea_len)
    loss = nn.NLLLoss()
    if gpu:
        enc.cuda()
        predictor.cuda()
        loss.cuda()

    optimizer = optim.Adam(
        list(enc.parameters()) + list(predictor.parameters()))

    print 'training'
    test_chain_A, test_chain_B, test_y = d.get_test_pairs()
    test_y = test_y.data.cpu().numpy()
    for train_iter in xrange(4000):
        chains_A, chains_B, y = d.get_train_pairs(1000)
        enc.zero_grad()
        predictor.zero_grad()
        output_A = enc(chains_A)
        output_B = enc(chains_B)
        softmax_output = predictor(output_A, output_B)
        loss_val = loss(softmax_output, y)
        loss_val.backward()
        optimizer.step()

        enc.zero_grad()
        predictor.zero_grad()
        output_test_A = enc(test_chain_A)
        output_test_B = enc(test_chain_B)
        softmax_output = predictor(output_test_A,
                                   output_test_B).data.cpu().numpy()
        test_y_pred = softmax_output.argmax(axis=1)
        cur_acc = (test_y_pred == test_y).sum() / len(test_y)
        print 'test acc:', cur_acc
        out_file.write('%f\n' % cur_acc)
        if train_iter % 50 == 0:
            torch.save(enc.state_dict(), 'ckpt/%i_encoder.model' % train_iter)
            torch.save(predictor.state_dict(),
                       'ckpt/%i_predictor.model' % train_iter)
    out_file.close()
Exemple #4
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)

    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    samples_path = os.path.join(results_path, 'samples')
    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      checkpoints_path,
                      samples_path,
                      params['n_samples'],
                      params['sample_length'],
                      params['sample_rate'],
                      data_loader(val_split, test_split, eval=True),
                      data_loader(test_split, 1, eval=True),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
    trainer.run(params['epoch_limit'])
Exemple #5
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)

    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
    else:
        trainer.epochs = 0
        trainer.iterations = 0
        torch.save(predictor,
                   os.path.join(checkpoints_path, "pytorch_model.bin"))
    # else:
    #     print("***** Saving fine-tuned model *****")
    #     output_model_file = os.path.join(params['results_path'], "pytorch_model.bin")
    #     if params['cuda']:
    #         torch.save(predictor, output_model_file)
    #     else:
    #         torch.save(predictor, output_model_file)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path, 'samples'),
                        params['n_samples'], params['sample_length'],
                        params['sample_rate'], params['sampling_temperature']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
Exemple #6
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    os.environ['CUDA_VISIBLE_DEVICES'] = params['gpu']

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'],
                      dropout=params['dropout'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['lr']))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    criterion = sequence_nll_loss_bits

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, params)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        start_epoch = int(epoch)
        global_step = iteration
        start_epoch = iteration
        predictor.load_state_dict(state_dict)
    else:
        start_epoch = 0
        global_step = 0

    #writer = SummaryWriter("runs/{}-{}".format(params['dataset'], str(datetime.datetime.now()).split('.')[0].replace(' ', '-')))
    writer = SummaryWriter(
        os.path.join(
            results_path, "{}-{}".format(
                params['dataset'],
                str(datetime.datetime.now()).split('.')[0].replace(' ', '-'))))
    dataset_train = data_loader(0, val_split, eval=False)
    dataset_val = data_loader(val_split, test_split, eval=True)
    dataset_test = data_loader(test_split, 1, eval=True)

    generator = Generator(predictor.model, params['cuda'])
    best_val_loss = 10000000000000

    for e in range(start_epoch, int(params['epoch_limit'])):
        for i, data in enumerate(dataset_train):

            batch_inputs = data[:-1]
            batch_target = data[-1]

            def wrap(input):
                if torch.is_tensor(input):
                    input = torch.autograd.Variable(input)
                    if params['cuda']:
                        input = input.cuda()
                return input

            batch_inputs = list(map(wrap, batch_inputs))

            batch_target = torch.autograd.Variable(batch_target)
            if params['cuda']:
                batch_target = batch_target.cuda()

            plugin_data = [None, None]

            def closure():
                batch_output = predictor(*batch_inputs)

                loss = criterion(batch_output, batch_target)
                loss.backward()

                if plugin_data[0] is None:
                    plugin_data[0] = batch_output.data
                    plugin_data[1] = loss.data

                return loss

            optimizer.zero_grad()
            optimizer.step(closure)
            train_loss = plugin_data[1]

            # stats: iteration
            writer.add_scalar('train/train loss', train_loss, global_step)
            print("E:{:03d}-S{:05d}: Loss={}".format(e, i, train_loss))
            global_step += 1

        # validation: per epoch
        predictor.eval()
        with torch.no_grad():
            loss_sum = 0
            n_examples = 0
            for data in dataset_val:
                batch_inputs = data[:-1]
                batch_target = data[-1]
                batch_size = batch_target.size()[0]

                def wrap(input):
                    if torch.is_tensor(input):
                        input = torch.autograd.Variable(input)
                        if params['cuda']:
                            input = input.cuda()
                    return input

                batch_inputs = list(map(wrap, batch_inputs))

                batch_target = torch.autograd.Variable(batch_target)
                if params['cuda']:
                    batch_target = batch_target.cuda()

                batch_output = predictor(*batch_inputs)

                loss_sum += criterion(batch_output,
                                      batch_target).item() * batch_size

                n_examples += batch_size

            val_loss = loss_sum / n_examples
            writer.add_scalar('validation/validation loss', val_loss,
                              global_step)
            print("== Validation Step E:{:03d}: Loss={} ==".format(
                e, val_loss))

        predictor.train()

        # saver: epoch
        last_pattern = 'ep{}-it{}'
        best_pattern = 'best-ep{}-it{}'
        if not params['keep_old_checkpoints']:
            pattern = os.path.join(checkpoints_path,
                                   last_pattern.format('*', '*'))
            for file_name in glob(pattern):
                os.remove(file_name)
        torch.save(
            predictor.state_dict(),
            os.path.join(checkpoints_path, last_pattern.format(e,
                                                               global_step)))

        cur_val_loss = val_loss
        if cur_val_loss < best_val_loss:
            pattern = os.path.join(checkpoints_path,
                                   last_pattern.format('*', '*'))
            for file_name in glob(pattern):
                os.remove(file_name)
            torch.save(
                predictor.state_dict(),
                os.path.join(checkpoints_path,
                             best_pattern.format(e, global_step)))
            best_val_loss = cur_val_loss

        generate_sample(generator, params, writer, global_step, results_path,
                        e)

    # generate final results
    generate_sample(generator, params, None, global_step, results_path, 0)
Exemple #7
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path,
                                     'samples'), params['n_samples'],
                        params['sample_length'], params['sample_rate']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
Exemple #8
0
def main(exp, dataset, **params):
    params = dict(default_params, exp=exp, dataset=dataset, **params)
    print(params)
    storage_client = None
    bucket = None

    path = os.path.join(params['datasets_path'], params['dataset'])

    if params['bucket']:
        storage_client = storage.Client()
        bucket = Bucket(storage_client, params['bucket'])
        preload_dataset(path, storage_client, bucket)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    (quantize, dequantize) = quantizer(params['q_method'])
    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model, dequantize)
    if params['cuda'] is not False:
        print(params['cuda'])
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['learning_rate']))

    data_loader = make_data_loader(path, model.lookback, quantize, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, storage_client,
                                           bucket)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(SchedulerPlugin(params['lr_scheduler_step']))

    def upload(file_path):
        if bucket is None:
            return

        name = file_path.replace(os.path.abspath(os.curdir) + '/', '')
        blob = Blob(name, bucket)
        try:
            blob.upload_from_filename(file_path, timeout=300)
        except Exception as e:
            print(str(e))

    trainer.register_plugin(AbsoluteTimeMonitor())

    samples_path = os.path.join(results_path, 'samples')
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints'], upload))
    trainer.register_plugin(
        GeneratorPlugin(samples_path,
                        params['n_samples'],
                        params['sample_length'],
                        params['sample_rate'],
                        params['q_levels'],
                        dequantize,
                        params['sampling_temperature'],
                        upload=upload))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(
            results_path,
            iteration_fields=[
                'training_loss',
                #('training_loss', 'running_avg'),
                'time'
            ],
            epoch_fields=[
                'training_loss', ('training_loss', 'running_avg'),
                'validation_loss', 'test_loss', 'time'
            ],
            plots={
                'loss': {
                    'x':
                    'iteration',
                    'ys': [
                        'training_loss',
                        # ('training_loss', 'running_avg'),
                        'validation_loss',
                        'test_loss'
                    ],
                    'log_y':
                    True
                }
            }))

    init_comet(params, trainer, samples_path, params['n_samples'],
               params['sample_rate'])

    trainer.run(params['epoch_limit'])
Exemple #9
0
def main():
    # Data Loader (Input Pipeline)
    print('loading dataset...')
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               num_workers=4,
                                               drop_last=True,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              num_workers=4,
                                              drop_last=True,
                                              shuffle=False)
    # Define models
    print('building model...')
    g = Generator(input_channel=input_channel)
    h = Predictor(n_outputs=num_classes)
    h2 = Predictor(n_outputs=num_classes)
    g.cuda()
    h.cuda()
    h2.cuda()
    print(g.parameters, h.parameters, h2.parameters)

    optimizer_g = torch.optim.Adam(g.parameters(), lr=learning_rate)
    optimizer_h = torch.optim.Adam(h.parameters(), lr=learning_rate)
    optimizer_h2 = torch.optim.Adam(h2.parameters(), lr=learning_rate)

    with open(txtfile, "a") as myfile:
        myfile.write('epoch: train_acc test_acc\n')

    epoch = 0
    train_acc = 0

    # evaluate models with random weights
    test_acc = evaluate(test_loader, g, h)
    print('Epoch [%d/%d], Test Accuracy: %.4f' %
          (epoch + 1, args.n_epoch, test_acc))

    # save results
    with open(txtfile, "a") as myfile:
        myfile.write(
            str(int(epoch)) + ': ' + str(train_acc) + ' ' + str(test_acc) +
            "\n")

    best_ce_acc = 0
    # training
    for epoch in range(1, args.n_epoch):
        # train models
        g.train()
        h.train()
        h2.train()
        adjust_learning_rate(optimizer_g, epoch)
        adjust_learning_rate(optimizer_h, epoch)
        adjust_learning_rate(optimizer_h2, epoch)
        train_acc = train(train_loader, epoch, g, optimizer_g, h, optimizer_h,
                          h2, optimizer_h2)

        # evaluate models
        test_acc = evaluate(test_loader, g, h)
        print(
            'Epoch [%d/%d], Training Accuracy: %.4F %%, Test Accuracy: %.4F %%'
            % (epoch + 1, args.n_epoch, train_acc * 100, test_acc * 100))
        with open(txtfile, "a") as myfile:
            myfile.write(
                str(int(epoch)) + ': ' + str(train_acc) + ' ' + str(test_acc) +
                "\n")
Exemple #10
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    # Model
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm'],
        batch_size=params['batch_size']
    )
    print("CUDA num: {}".format(torch.cuda.device_count()))
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    model_cnnseq2sample = CNNSeq2SampleRNN(params).cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    data_loader_test = make_data_loader(model.lookback, params, npz_filename=params['npz_filename_test'])
    # test_split = 1 - params['test_frac']
    # val_split = test_split - params['val_frac']

    trainer = Trainer(
        predictor, model_cnnseq2sample, sequence_nll_loss_bits, optimizer,
        # data_loader(0, val_split, eval=False),
        data_loader(0, 1, eval=False),
        cuda=params['cuda']
    )

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    checkpoint_data_cnnseq2sample = load_last_checkpoint(checkpoints_path, model_type='cnnseq2sample')
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        (state_dict_cnnseq2sample, epoch, iteration) = checkpoint_data_cnnseq2sample
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
        model_cnnseq2sample.load_state_dict(state_dict_cnnseq2sample)

    trainer.register_plugin(TrainingLossMonitor(
        smoothing=params['loss_smoothing']
    ))
    trainer.register_plugin(ValidationPlugin(
        # data_loader(val_split, test_split, eval=True),
        # data_loader_test(0, 1, eval=True)
        data_loader_test(0, params['val_frac'], eval=True),
        data_loader_test(params['val_frac'], 1, eval=True)
        # data_loader(test_split, 1, eval=True)
    ))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(SaverPlugin(
        checkpoints_path, params['keep_old_checkpoints']
    ))
    trainer.register_plugin(GeneratorPlugin(
        os.path.join(results_path, 'samples'), params['n_samples'],
        params['sample_length'], params['sample_rate']
    ))
    trainer.register_plugin(
        Logger([
            'training_loss',
            'validation_loss',
            'test_loss',
            'time'
        ])
    )
    trainer.register_plugin(StatsPlugin(
        results_path,
        iteration_fields=[
            'training_loss',
            ('training_loss', 'running_avg'),
            'time'
        ],
        epoch_fields=[
            'validation_loss',
            'test_loss',
            'time'
        ],
        plots={
            'loss': {
                'x': 'iteration',
                'ys': [
                    'training_loss',
                    ('training_loss', 'running_avg'),
                    'validation_loss',
                    'test_loss',
                ],
                'log_y': True
            }
        }
    ))

    trainer.run(params['epoch_limit'])
Exemple #11
0
modelpath = os.path.join(modeldir, modelname)

srnn_model1 = SampleRNN(frame_sizes=[4, 16],
                        n_rnn=2,
                        dim=1024,
                        learn_h0=True,
                        q_levels=256,
                        weight_norm=True)

if torch.cuda.is_available():
    srnn_model1 = srnn_model1.cuda()

predictor1 = Predictor(srnn_model1)

if torch.cuda.is_available():
    predictor1 = predictor1.cuda()

if torch.cuda.is_available():
    predictor1.load_state_dict(torch.load(modelpath)['model'])
else:
    predictor1.load_state_dict(
        torch.load(modelpath, map_location='cpu')['model'])

print("model loaded successfully!")

generate = Generator(srnn_model1, True)

import time
s_time = time.time()

sys.stderr.write("Generating {} sequences, each of length {}."\

def wrap(input):
    if torch.is_tensor(input):
        input = Variable(input)
        if params['cuda']:
            input = input.cuda()
    return input


for data in dataset:
    batch_inputs = data[:-1]
    batch_target = data[-1]
    batch_inputs = list(map(wrap, batch_inputs))

    batch_target = Variable(batch_target)
    if params['cuda']:
        batch_target = batch_target.cuda()

    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    prediction = predictor(*batch_inputs)  # , reset=False)
    prediction_data = prediction.data
    print(prediction)

    # Predict audios from 1 samples
    break