コード例 #1
0
    def test_ESR(self):
        loss_fcn = training.ESRLoss()
        output = torch.zeros([100, 10, 5], requires_grad=True)
        target = torch.ones([100, 10, 5], requires_grad=True)

        loss = loss_fcn(output, target)
        loss.backward()

        loss = loss_fcn(output, output)
        assert loss.item() == 0
コード例 #2
0
    def test_pre_emph(self):
        loss_fcn = training.ESRLoss()
        pre_emph = training.PreEmph([0.95, 1])
        output = torch.zeros([100, 10, 1], requires_grad=True)
        target = torch.ones([100, 10, 1], requires_grad=True)

        output_pre, target_pre = pre_emph(output, target)

        assert output.shape == output_pre.shape
        assert target.shape == target_pre.shape

        loss = loss_fcn(output_pre, target_pre)
        loss.backward()
コード例 #3
0
def test_system():
    network = networks.RecNet()
    network.add_layer({'block_type': 'GRU', 'input_size': 2, 'output_size': 1, 'hidden_size': 16})

    data = dataset.DataSet(os.path.join('result_test', 'network1'))
    data.create_subset('train', 8820)
    data.load_file('KDonnerFlangerra12c12rg9Singles1', set_names='train')

    optimiser = torch.optim.Adam(network.parameters(), lr=0.0001)
    loss_fcn = training.ESRLoss()
    batch_size = 10
    init_len = 120
    up_fr = 100
    for i in range(3):
        print('Starting epoch ' + str(i+1))
        train_losses = []
        # shuffle the segments at the start of the epoch
        train_input = data.subsets['train'].data['input'][0]
        train_target = data.subsets['train'].data['target'][0]
        shuffle = torch.randperm(train_input.shape[1])

        for n in range(math.ceil(shuffle.shape[0]/batch_size)):
            # Load batch of randomly selected segments
            batch_losses = []
            batch = train_input[:, shuffle[n*batch_size:(n+1)*batch_size], :]
            target = train_target[:, shuffle[n*batch_size:(n+1)*batch_size], :]

            # Run initialisation samples through the network
            network(batch[0:init_len, :, :])
            # Zero the gradient buffers
            network.zero_grad()

            # Iterate over the remaining samples in the sequence batch
            for k in range(math.ceil((batch.shape[0]-init_len)/up_fr)):
                output = network(batch[init_len+(k*up_fr):init_len+((k+1)*up_fr), :, :])
                # Calculate loss
                loss = loss_fcn(output, target[init_len+(k*up_fr):init_len+((k+1)*up_fr), :, :])
                train_losses.append(loss.item())
                batch_losses.append(loss.item())

                loss.backward()
                optimiser.step()

                # Set the network hidden state, to detach it from the computation graph
                network.detach_hidden()

            print('batch ' + str(n+1) + ' loss = ' + str(np.mean(batch_losses)))
        print('epoch ' + str(i+1) + ' loss = ' + str(np.mean(train_losses)))
コード例 #4
0
    def test_network_loading(self):
        network_params = miscfuncs.json_load('config.json',
                                             ['result_test', 'network1'])
        network_params['state_dict'] = torch.load(
            os.path.join('result_test', 'network1', 'modelBest.pt'),
            map_location=torch.device('cpu'))
        model_data = networks.legacy_load(network_params)

        network = networks.load_model(model_data)

        data = dataset.DataSet(os.path.join('result_test', 'network1'))
        data.create_subset('test', 0)
        data.load_file('KDonnerFlangerra12c12rg9Singles1', set_names='test')

        with open(os.path.join('result_test', 'network1', 'tloss.txt')) as fp:
            x = fp.read()
        with torch.no_grad():
            output = network(data.subsets['test'].data['input'][0])
            loss_fcn = training.ESRLoss()

            loss = loss_fcn(output, data.subsets['test'].data['target'][0])
            assert abs(loss.item() - float(x[1:-1])) < 1e-5
コード例 #5
0
ファイル: main.py プロジェクト: Alec-Wright/NeuralTimeVaryFX
                              epoch)

        train_track.train_epoch_update(epoch_loss.item(), ep_st_time,
                                       time.time(), init_time, epoch)
        # write loss to the tensorboard (just for recording purposes)
        writer.add_scalar('Loss/train', train_track['training_losses'][-1],
                          epoch)

        network.save_model('model', save_path)
        miscfuncs.json_save(train_track, 'training_stats', save_path)

        if args.validation_p and patience_counter > args.validation_p:
            print('validation patience limit reached at epoch ' + str(epoch))
            break

    lossESR = training.ESRLoss()
    test_output, test_loss = network.process_data(
        dataset.subsets['test'].data['input'][0],
        dataset.subsets['test'].data['target'][0], loss_functions,
        args.test_chunk)
    test_loss_ESR = lossESR(test_output,
                            dataset.subsets['test'].data['target'][0])
    write(os.path.join(save_path, "test_out_final.wav"),
          dataset.subsets['test'].fs,
          test_output.cpu().numpy()[:, 0, 0])
    writer.add_scalar('Loss/test_loss', test_loss.item(), 1)
    writer.add_scalar('Loss/test_lossESR', test_loss_ESR.item(), 1)
    train_track['test_loss_final'] = test_loss.item()
    train_track['test_lossESR_final'] = test_loss_ESR.item()

    best_val_net = miscfuncs.json_load('model_best', save_path)