def load_training_set(args_dict, train_list_path, train_on_f=True, train_on_g=True):
    """needed to get the threshold value for prediction at onsets"""
    train_dataset = TurnPredictionDataset(args_dict['feature_dict_list'], annotations_dir_train, train_list_path,
                                          args_dict['sequence_length'], prediction_length, 'train',
                                          data_select=data_set_select, train_on_f=train_on_f, train_on_g=train_on_g)
    train_dataloader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=False, num_workers=0,
                                  drop_last=True, pin_memory=p_memory)
    return train_dataset, train_dataloader
def load_test_set(args_dict, test_list_path, test_on_g=True, test_on_f=True):
    test_dataset = TurnPredictionDataset(args_dict['feature_dict_list'], annotations_dir_test, test_list_path,
                                         args_dict['sequence_length'], prediction_length, 'test',
                                         data_select=data_set_select, test_on_f=test_on_f, test_on_g=test_on_g)

    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0, drop_last=False,
                                 pin_memory=p_memory)

    return test_dataset, test_dataloader
Exemplo n.º 3
0
    p_memory = True

    # %% Data loaders
    # =============================================================================
    '''
    build dataloader to load data
    '''
# =============================================================================
t1 = t.time()

# training set data loader
print('feature dict list:', feature_dict_list)
train_dataset = TurnPredictionDataset(feature_dict_list,
                                      annotations_dir,
                                      train_list_path,
                                      sequence_length,
                                      prediction_length,
                                      'train',
                                      data_select=data_set_select)
train_dataloader = DataLoader(train_dataset,
                              batch_size=train_batch_size,
                              shuffle=shuffle,
                              num_workers=0,
                              drop_last=True,
                              pin_memory=p_memory)
feature_size_dict = train_dataset.get_feature_size_dict()

if slow_test:
    # slow test loader
    test_dataset = TurnPredictionDataset(feature_dict_list,
                                         annotations_dir,
    #    torch.cuda.device(randint(0,1))
    dtype = torch.cuda.FloatTensor
    dtype_long = torch.cuda.LongTensor
    p_memory = True
else:
    dtype = torch.FloatTensor
    dtype_long = torch.LongTensor
    p_memory = True

# %% Data loaders
t1 = t.time()

# training set data loader
print('feature dict list:', feature_dict_list)
train_dataset = TurnPredictionDataset(feature_dict_list, annotations_dir, train_list_path, sequence_length,
                                      prediction_length, 'train', data_select=data_set_select, train_on_f=train_on_f,
                                      train_on_g=train_on_g)
train_dataloader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=shuffle, num_workers=0,
                              drop_last=True, pin_memory=p_memory)
feature_size_dict = train_dataset.get_feature_size_dict()

if slow_test:
    # slow test loader
    test_dataset = TurnPredictionDataset(feature_dict_list, annotations_dir, test_list_path, sequence_length,
                                         prediction_length, 'test', data_select=data_set_select, test_on_f=test_on_f,
                                         test_on_g=test_on_g)

    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0, drop_last=False,
                                 pin_memory=p_memory)
else:
    # quick test loader
Exemplo n.º 5
0
print('Use CUDA: ' + str(use_cuda))

if use_cuda:
    #    torch.cuda.device(randint(0,1))
    dtype = torch.cuda.FloatTensor
    dtype_long = torch.cuda.LongTensor
    p_memory = True
else:
    dtype = torch.FloatTensor
    dtype_long = torch.LongTensor
    p_memory = True

# %% Data loaders
t1 = t.time()

complete_dataset = TurnPredictionDataset(feature_dict_list, annotations_dir, complete_path, sequence_length,
                                      prediction_length, 'test', data_select=data_set_select)

complete_dataloader = DataLoader(complete_dataset, batch_size=1, shuffle=False, num_workers=0,  # previously shuffle = shuffle
                              drop_last=False, pin_memory=p_memory)

feature_size_dict = complete_dataset.get_feature_size_dict()

print('time taken to load data: ' + str(t.time() - t1))

complete_file_list = list(pd.read_csv(complete_path, header=None, dtype=str)[0])

lstm = torch.load('lstm_models/ling_50ms.p')
ffnn = torch.load('smol_from_big.p')

s = nn.Sigmoid()
Exemplo n.º 6
0
    p_memory = True
else:
    dtype = torch.FloatTensor
    dtype_long = torch.LongTensor
    p_memory = True

# %% Data loaders
t1 = t.time()

# training set data loader
print('feature dict list:', feature_dict_list)
#How do I load the true values? How is b[4] the true values?
train_dataset = TurnPredictionDataset(feature_dict_list,
                                      annotations_dir,
                                      train_list_path,
                                      sequence_length,
                                      prediction_length,
                                      'train',
                                      data_select=data_set_select,
                                      annotations_key=annotations_role)
train_dataloader = DataLoader(train_dataset,
                              batch_size=train_batch_size,
                              shuffle=shuffle,
                              num_workers=0,
                              drop_last=True,
                              pin_memory=p_memory)
feature_size_dict = train_dataset.get_feature_size_dict()

if slow_test:
    # slow test loader
    test_dataset = TurnPredictionDataset(feature_dict_list,
                                         annotations_dir,