Esempio n. 1
0
    def test_create_subset(self):
        data_path = os.path.join('result_test', 'network1')
        test_dataset = dataset.DataSet(data_path)
        test_dataset.create_subset('train', frame_len=44100)
        test_dataset.create_subset('val')
        test_dataset.create_subset('test')
        test_dataset.load_file('KDonnerFlangerra12c12rg9Singles1',
                               ['train', 'val', 'test'], [0.75, 0.125, 0.125])
        test_dataset.load_file('KDonnerFlangerra12c12rg9Singles1')

        test_dataset2 = dataset.DataSet(data_path, extensions=None)
        test_dataset2.create_subset('train', frame_len=44100)
        test_dataset2.create_subset('val')
        test_dataset2.create_subset('test')
        test_dataset2.load_file('KDonnerFlangerra12c12rg9Singles1-input')
        test_dataset2.load_file('KDonnerFlangerra12c12rg9Singles1-input',
                                ['train', 'val', 'test'], [0.75, 0.125, 0.125])
        test_dataset2.load_file('KDonnerFlangerra12c12rg9Singles1-input',
                                ['train', 'val', 'test'], [0.75, 0.125, 0.125])

        test_dataset3 = dataset.DataSet(data_path, extensions=None)
        test_dataset3.create_subset('train', frame_len=44100)
        test_dataset3.create_subset('val')
        test_dataset3.create_subset('test')
        test_dataset3.load_file('KDonnerFlangerra12c12rg9Singles1-input',
                                cond_val=0)
        test_dataset3.load_file('KDonnerFlangerra12c12rg9Singles1-input',
                                ['train', 'val', 'test'], [0.75, 0.125, 0.125],
                                cond_val=0.25)
        test_dataset3.load_file('KDonnerFlangerra12c12rg9Singles1-input',
                                cond_val=0.5)
def proc_audio(args):
    network_data = miscfuncs.json_load(args.model_file)
    network = networks.load_model(network_data)
    data = dataset.DataSet(data_dir='', extensions='')
    data.create_subset('data')
    data.load_file(args.input_file, set_names='data')
    with torch.no_grad():
        output = network(data.subsets['data'].data['data'][0])
    write(args.output_file, data.subsets['data'].fs,
          output.cpu().numpy()[:, 0, 0])
Esempio n. 3
0
def test_system():
    network = networks.RecNet()
    network.add_layer({'block_type': 'GRU', 'input_size': 2, 'output_size': 1, 'hidden_size': 16})

    data = dataset.DataSet(os.path.join('result_test', 'network1'))
    data.create_subset('train', 8820)
    data.load_file('KDonnerFlangerra12c12rg9Singles1', set_names='train')

    optimiser = torch.optim.Adam(network.parameters(), lr=0.0001)
    loss_fcn = training.ESRLoss()
    batch_size = 10
    init_len = 120
    up_fr = 100
    for i in range(3):
        print('Starting epoch ' + str(i+1))
        train_losses = []
        # shuffle the segments at the start of the epoch
        train_input = data.subsets['train'].data['input'][0]
        train_target = data.subsets['train'].data['target'][0]
        shuffle = torch.randperm(train_input.shape[1])

        for n in range(math.ceil(shuffle.shape[0]/batch_size)):
            # Load batch of randomly selected segments
            batch_losses = []
            batch = train_input[:, shuffle[n*batch_size:(n+1)*batch_size], :]
            target = train_target[:, shuffle[n*batch_size:(n+1)*batch_size], :]

            # Run initialisation samples through the network
            network(batch[0:init_len, :, :])
            # Zero the gradient buffers
            network.zero_grad()

            # Iterate over the remaining samples in the sequence batch
            for k in range(math.ceil((batch.shape[0]-init_len)/up_fr)):
                output = network(batch[init_len+(k*up_fr):init_len+((k+1)*up_fr), :, :])
                # Calculate loss
                loss = loss_fcn(output, target[init_len+(k*up_fr):init_len+((k+1)*up_fr), :, :])
                train_losses.append(loss.item())
                batch_losses.append(loss.item())

                loss.backward()
                optimiser.step()

                # Set the network hidden state, to detach it from the computation graph
                network.detach_hidden()

            print('batch ' + str(n+1) + ' loss = ' + str(np.mean(batch_losses)))
        print('epoch ' + str(i+1) + ' loss = ' + str(np.mean(train_losses)))
Esempio n. 4
0
    def test_network_loading(self):
        network_params = miscfuncs.json_load('config.json',
                                             ['result_test', 'network1'])
        network_params['state_dict'] = torch.load(
            os.path.join('result_test', 'network1', 'modelBest.pt'),
            map_location=torch.device('cpu'))
        model_data = networks.legacy_load(network_params)

        network = networks.load_model(model_data)

        data = dataset.DataSet(os.path.join('result_test', 'network1'))
        data.create_subset('test', 0)
        data.load_file('KDonnerFlangerra12c12rg9Singles1', set_names='test')

        with open(os.path.join('result_test', 'network1', 'tloss.txt')) as fp:
            x = fp.read()
        with torch.no_grad():
            output = network(data.subsets['test'].data['input'][0])
            loss_fcn = training.ESRLoss()

            loss = loss_fcn(output, data.subsets['test'].data['target'][0])
            assert abs(loss.item() - float(x[1:-1])) < 1e-5
Esempio n. 5
0
    else:
        save_path = 'Results/' + args.device + '_hs-' + str(args.hidden_size) + \
                  '_nl-' + str(args.num_layers) + '_' + args.file_name

    # Check if a cuda device is available
    if not torch.cuda.is_available():
        print('cuda device not available')
        cuda = 0
    else:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.set_device(0)
        print('cuda device available')
        cuda = 1

    # Create dataset object
    dataset = dataset.DataSet(data_dir=args.data_location)
    dataset.create_subset('train', frame_len=args.seg_len)
    dataset.create_subset('val')
    dataset.create_subset('test')

    #dataset = dataset.DataSet('data')
    #dataset.load_file('train/BehPhaserToneoffSingles1', 'train')
    #dataset.load_file('val/BehPhaserToneoffSingles1', 'val')
    #dataset.load_file('test/BehPhaserToneoffSingles1', 'test')

    dataset.load_file(args.file_name, ['train', 'val', 'test'],
                      [0.75, 0.125, 0.125])

    # Create instance of Network.RNN class
    network = networks.SimpleRNN(hidden_size=args.hidden_size,
                                 num_layers=args.num_layers,
Esempio n. 6
0
    # Set up training optimiser + scheduler + loss fcns and training info tracker
    optimiser = torch.optim.Adam(network.parameters(),
                                 lr=args.learn_rate,
                                 weight_decay=1e-4)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimiser,
                                                     'min',
                                                     factor=0.5,
                                                     patience=5,
                                                     verbose=True)
    loss_functions = training.LossWrapper(args.loss_fcns, args.pre_filt)
    train_track = training.TrainTrack()
    writer = SummaryWriter(os.path.join('runs2', model_name))

    # Load dataset
    dataset = dataset.DataSet(data_dir='Data')

    dataset.create_subset('train', frame_len=22050)
    dataset.load_file(os.path.join('train', args.file_name), 'train')

    dataset.create_subset('val')
    dataset.load_file(os.path.join('val', args.file_name), 'val')

    dataset.create_subset('test')
    dataset.load_file(os.path.join('test', args.file_name), 'test')

    # If training is restarting, this will ensure the previously elapsed training time is added to the total
    init_time = time.time() - start_time + train_track['total_time'] * 3600
    # Set network save_state flag to true, so when the save_model method is called the network weights are saved
    network.save_state = True
    patience_counter = 0