コード例 #1
0
def feature_net_cross_eval(datasets, outputPath, epochs, debug=False):
    eval_scores = np.empty((8, 8))
    for test_run_idx in range(8):
        # create indices for train and test split
        train_runs_indices = [j for j in range(8)]
        train_runs_indices.pop(test_run_idx)
        train_dataset_list = [datasets[j] for j in train_runs_indices]

        # create combined training set
        combined_train_dataset = ConcatDataset(train_dataset_list)
        train_sampler = RandomSampler(combined_train_dataset)
        train_loader = DataLoader(combined_train_dataset,
                                  sampler=train_sampler,
                                  batch_size=64,
                                  num_workers=0,
                                  pin_memory=True)

        # create model, optimizer, error function
        num_features = combined_train_dataset.__getitem__(0)[0].shape[0]
        num_ffa_voxels = combined_train_dataset.__getitem__(0)[1].shape[0]
        model = FeatureNet(num_features, num_ffa_voxels).cuda()
        model.train()
        criterion = torch.nn.MSELoss()
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=0.001,
                                    momentum=0.9,
                                    weight_decay=0.001)

        # fit model
        fit_feature_model(model,
                          criterion,
                          optimizer,
                          train_loader,
                          epochs=epochs,
                          debug=debug)

        # Evaluate model
        model.eval()

        # Each column stores the individual run correlations for a given test set
        for run_idx, run in enumerate(datasets):
            eval_scores[run_idx,
                        test_run_idx] = featuresCorr(datasets[run_idx], model)


#         # in sample error
#         eval_scores[test_run_idx, 0] = featuresCorr(combined_train_dataset, model)
#         # out of sample error
#         test_dataset = datasets[test_run_idx]
#         eval_scores[test_run_idx, 1] = featuresCorr(test_dataset, model)

# Save data
        np.save(outputPath, eval_scores)
    return eval_scores
class MultiwayDataset:
    def __init__(self, pairs, tokenizer, dictionary):
        self.datasets = [
            ParallelDataset(fst, snd, tokenizer, dictionary)
            for fst, snd in pairs
        ]
        self.concatenated = ConcatDataset(self.datasets)
        self.lengths = list(
            itertools.chain(*[dataset.lengths for dataset in self.datasets]))

        self.indices = np.argsort(self.lengths)

    def __getitem__(self, idx):
        # idy = self.indices[idx]
        # print(idy, idx, self.lengths[idx])
        return self.concatenated.__getitem__(idx)

    def __len__(self):
        return self.concatenated.__len__()
コード例 #3
0
ファイル: task.py プロジェクト: zzf2014/pytorch-meta
 def __getitem__(self, index):
     return ConcatDataset.__getitem__(self, index)
コード例 #4
0
ファイル: base.py プロジェクト: Renovamen/metallic
 def __getitem__(self, index: int) -> tuple:
     return ConcatDataset.__getitem__(self, index)