示例#1
0
 def __init__(self,dataset,batch_size,directory='',**kwargs):
     if dataset == 'musicnet':
         if not directory:
             directory = MUSICNET_DIRECTORY
         self.dataset = musicnet.MusicNet(root=directory, train=True, window=kwargs[WINDOW], download=kwargs.get(DOWNLOAD,True),pitch_shift=kwargs.get(PITCH_SHIFT,0),jitter=kwargs.get(JITTER,0))
         self.loader = torch.utils.data.DataLoader(dataset=self.dataset,batch_size=batch_size)
     elif dataset == 'cochleagram':
         self.dataset = CochleagramsDataset(root=directory)
         self.loader = torch.utils.data.DataLoader(dataset=self.dataset,batch_size=batch_size)
     else:
         raise NameError(f'dataset {dataset} not handled')
示例#2
0
root = 'musicnet'


def worker_init(args):
    signal.signal(signal.SIGINT,
                  signal.SIG_IGN)  # ignore signals so parent can handle them
    np.random.seed(os.getpid()
                   ^ int(time()))  # approximately random seed for workers


batch_size = 100
kwargs = {'num_workers': 0, 'pin_memory': True, 'worker_init_fn': worker_init}
window = 16384

train_set = musicnet.MusicNet(root=root,
                              train=True,
                              download=True,
                              window=window)
test_set = musicnet.MusicNet(root=root, train=False, window=window)
train_loader = torch.utils.data.DataLoader(dataset=train_set,
                                           batch_size=batch_size,
                                           **kwargs)
test_loader = torch.utils.data.DataLoader(dataset=test_set,
                                          batch_size=batch_size,
                                          **kwargs)

print('data loaded ')
model = NADE(input_dim=128, hidden_dim=50).to(device)
loss_function = nn.BCELoss(reduction="sum")
optimizer = optim.Adam(model.parameters())
scheduler = optim.lr_scheduler.StepLR(optimizer, 15, gamma=0.1)
示例#3
0
result_dict['parameters']['train_size'] = train_size
result_dict['parameters']['test_size'] = test_size
result_dict['parameters']['lr'] = lr
result_dict['parameters']['pitch_shift'] = pitch_shift
result_dict['parameters']['jitter'] = jitter
result_dict['parameters']['window'] = window

# Preparing Dataset

start = time()
root = './data/'
train_set = musicnet.MusicNet(root=root,
                              epoch_size=train_size,
                              sequence=sequence,
                              train=True,
                              download=True,
                              refresh_cache=False,
                              window=window,
                              mmap=False,
                              pitch_shift=pitch_shift,
                              jitter=jitter)
test_set = musicnet.MusicNet(root=root,
                             train=False,
                             download=True,
                             sequence=sequence,
                             refresh_cache=False,
                             window=window,
                             epoch_size=test_size,
                             mmap=False)
print("Data loaded, time used = {:2.2f} seconds".format(time() - start))

train_loader = torch.utils.data.DataLoader(dataset=train_set,
示例#4
0
def main():
    ids = musicnet.MusicNet().ids

    #    pool = Pool(cpu_count())
    for id in ids:
        processdata(id)