Exemplo n.º 1
0
    def __init__(self, config, dataset, train_loader, subgraph_loader, eval_split='test', \
            chkpt=None, get_emb=False, get_logits=False, get_attn=False):
        super().__init__()
        self.config = config
        self.dataset = dataset
        self.batch_loader = train_loader
        self.subgraph_loader = subgraph_loader
        self.learning_rate = config['lr']
        self.get_emb = get_emb
        self.get_logits = get_logits
        self.get_attn = get_attn

        self.net = NsLstmGNN(self.config)

        if chkpt is not None:
            self.net.load_state_dict(chkpt['state_dict'], strict=False)
            print('Loaded states')

        self.loss = get_loss_function(config['task'], config['class_weights'])

        self.collect_outputs = lambda x: collect_outputs(
            x, config['multi_gpu'])
        self.compute_metrics = lambda truth, pred: get_metrics(
            truth, pred, config['verbose'], config['classification'])
        self.per_node_metrics = lambda truth, pred: get_per_node_result(
            truth, pred, self.dataset.idx_test, config['classification'])

        self.eval_split = eval_split
        self.eval_mask = self.dataset.data.val_mask if eval_split == 'test' else self.dataset.data.test_mask

        entire_set = LstmDataset(config)
        collate = lambda x: collate_fn(x, config['task'])
        self.ts_loader = DataLoader(entire_set, collate_fn=collate, \
                batch_size=config['batch_size'], num_workers=config['num_workers'], shuffle=False)
        self.lg_alpha = config['lg_alpha']
def test(model, ds, criterion, device, dataset_type="test"):
    """
    Evaluates and prints the result to the console.
    """
    test_dl = utils.get_dl(ds, bs=64, shuffle=False)
    test_losses, test_acts, test_dog_human_targets, \
        test_breed_targets = engine.eval_loop(test_dl, model,
                                              criterion, device)
    test_metrics = metrics.get_metrics(
        test_losses, test_acts, test_dog_human_targets, test_breed_targets,
        criterion.dog_idx, criterion.human_idx)
    output = []
    output.append([f'{dataset_type}_loss', f'{dataset_type}_acc_human',
                   f'{dataset_type}_acc_dog', f'{dataset_type}_acc_breed',
                   f'{dataset_type}_acc_f1score'])
    output.append([
        test_metrics['loss'],
        test_metrics['accuracy_human'].item(),
        test_metrics['accuracy_dog'].item(),
        test_metrics['accuracy_breed'].item(),
        test_metrics['f1score_breed']])
    print(tabulate(output))
    return {
        'loss': test_losses,
        'acts': test_acts,
        'dog_human_targets': test_dog_human_targets,
        'breed_targets': test_breed_targets
    }
    
Exemplo n.º 3
0
    def __init__(self,
                 config,
                 dataset,
                 train_loader,
                 subgraph_loader,
                 eval_split='test'):
        super().__init__()
        self.config = config
        self.dataset = dataset
        self.batch_loader = train_loader
        self.subgraph_loader = subgraph_loader
        self.learning_rate = config['lr']

        self.net = NsGNN(config)

        self.loss = get_loss_function(config['task'], config['class_weights'])

        self.collect_outputs = lambda x: collect_outputs(
            x, config['multi_gpu'])
        self.compute_metrics = lambda x: get_metrics(x['truth'], x[
            'pred'], config['verbose'], config['classification'])
        self.per_node_metrics = lambda x: get_per_node_result(
            x['truth'], x['pred'], self.dataset.idx_test, config[
                'classification'])

        self.eval_split = eval_split
        self.eval_mask = self.dataset.data.val_mask if eval_split == 'val' else self.dataset.data.test_mask
Exemplo n.º 4
0
def back_test_async(i, parametros, df, data_inicio, data_final):

    operacoes, param = back_test(parametros, df, data_inicio, data_final)
    results = metrics.get_metrics(operacoes, parametros[0] * 25000)

    output = {'iteracao': i}
    output.update(param)
    output.update(results)

    return output
Exemplo n.º 5
0
    def __init__(self, config, collate, train_set=None, val_set=None, test_set=None):
        super().__init__()
        self.config = config
        self.trainset = train_set
        self.validset = val_set
        self.testset = test_set
        self.learning_rate = self.config['lr']
        self.collate = collate
        self.task = config['task']
        self.is_cls = config['classification']
        self.net = DynamicLstmGnn(config)

        self.loss = get_loss_function(self.task, config['class_weights'])
        self.collect_outputs = lambda x: collect_outputs(x, config['multi_gpu'])
        self.compute_metrics = lambda truth, pred : get_metrics(truth, pred, config['verbose'], config['classification'])
        self.per_node_metrics = lambda truth, pred : get_per_node_result(truth, pred, self.testset.idx_test, config['classification'])
        self.lg_alpha = config['lg_alpha']
Exemplo n.º 6
0
    def __init__(self,
                 config,
                 dataset,
                 train_loader,
                 subgraph_loader,
                 eval_split='test',
                 get_lstm_out=False,
                 get_logits=False):
        super().__init__()
        self.config = config
        self.dataset = dataset
        self.batch_loader = train_loader
        self.subgraph_loader = subgraph_loader
        self.learning_rate = config['lr']
        self.get_lstm_out = get_lstm_out
        self.get_logits = get_logits

        self.net = Net(config)

        self.loss = get_loss_function(config['task'], config['class_weights'])

        self.collect_outputs = lambda x: collect_outputs(
            x, config['multi_gpu'])
        self.compute_metrics = lambda x: get_metrics(x['truth'], x[
            'pred'], config['verbose'], config['classification'])
        self.per_node_metrics = lambda x: get_per_node_result(
            x['truth'], x['pred'], self.dataset.idx_test, config[
                'classification'])

        self.eval_split = eval_split
        self.eval_mask = self.dataset.data.val_mask if eval_split == 'val' else self.dataset.data.test_mask

        entire_set = LstmDataset(config)
        collate = lambda x: collate_fn(x, config['task'])
        self.ts_loader = DataLoader(entire_set, collate_fn=collate, \
                batch_size=config['batch_size'], num_workers=config['num_workers'], shuffle=False)