예제 #1
0
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        seen = 0
        totals = {'loss': 0, self.metric_name: 0}
        for batch_index, batch in enumerate(self.taskloader):
            x, y = self.prepare_batch(batch)

            loss, y_pred, _ = self.eval_fn(self.model,
                                           self.optimiser,
                                           self.loss_fn,
                                           x,
                                           y,
                                           n_shot=self.n_shot,
                                           k_way=self.k_way,
                                           q_queries=self.q_queries,
                                           train=False,
                                           **self.kwargs)

            seen += y_pred.shape[0]

            totals['loss'] += loss.item() * y_pred.shape[0]
            totals[self.metric_name] += categorical_accuracy(
                y, y_pred) * y_pred.shape[0]
            self.writer.add_scalar(
                'Val_loss', totals['loss'],
                len(self.taskloader) * (epoch - 1) + batch_index)
            self.writer.add_scalar(
                'Val_accuracy', categorical_accuracy(y, y_pred),
                len(self.taskloader) * (epoch - 1) + batch_index)

        logs[self.prefix + 'loss'] = totals['loss'] / seen
        logs[self.metric_name] = totals[self.metric_name] / seen
예제 #2
0
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        seen = 0
        totals = {'loss': 0, self.metric_name: 0}
        if isinstance(self.model, list):
            per_model_stats = {f'loss_{i}': 0 for i in range(len(self.model))}
            per_model_stats.update({
                self.metric_name + f"_{i}": 0
                for i in range(len(self.model))
            })

        for batch_index, batch in enumerate(self.taskloader):
            x, y = self.prepare_batch(batch)

            #loss, y_pred, *base_logs = self.eval_fn(
            result = self.eval_fn(self.model,
                                  self.optimiser,
                                  self.loss_fn,
                                  x,
                                  y,
                                  n_shot=self.n_shot,
                                  k_way=self.k_way,
                                  q_queries=self.q_queries,
                                  train=False,
                                  **self.kwargs)

            loss = result['meta_batch_loss']
            y_pred = result['task_predictions']
            models_losses = result['models_losses']
            models_preds = result['models_predictions']

            for i, (loss_i,
                    y_pred_i) in enumerate(zip(models_losses, models_preds)):
                y_pred_i = torch.cat(y_pred_i)
                per_model_stats[f'loss_{i}'] += np.mean(
                    loss_i) * y_pred_i.shape[0]
                per_model_stats[self.metric_name +
                                f"_{i}"] += categorical_accuracy(
                                    y, y_pred_i) * y_pred_i.shape[0]

            seen += y_pred.shape[0]

            totals['loss'] += loss.item() * y_pred.shape[0]
            totals[self.metric_name] += categorical_accuracy(
                y, y_pred) * y_pred.shape[0]

        logs[self.prefix + 'loss'] = totals['loss'] / seen
        logs[self.metric_name] = totals[self.metric_name] / seen
        if isinstance(self.model, list):
            for i in range(len(self.model)):
                logs[self.prefix + 'loss_{}'.format(
                    i)] = per_model_stats['loss_{}'.format(i)] / seen
                logs[self.metric_name +
                     "_{}".format(i)] = per_model_stats[self.metric_name +
                                                        "_{}".format(i)] / seen
예제 #3
0
    def on_train_end(self, logs=None):
        checkpoints = torch.load(self.checkpoint_filepath)
        self.model.load_state_dict(checkpoints)
        for param in self.model.parameters():
            param.requires_grad = False
        self.model.to(torch.device('cuda'), dtype=torch.double)

        logs = logs or {}
        seen = 0
        totals = {'loss': 0, self.metric_name: 0}
        for batch_index, batch in enumerate(self.taskloader):
            x, y = self.prepare_batch(batch)

            loss, y_pred = self.eval_fn(
                self.model,
                self.optimiser,
                self.loss_fn,
                x,
                y,
                n_shot=self.n_shot,
                k_way=self.k_way,
                q_queries=self.q_queries,
                train=False,
                **self.kwargs
            )

            seen += y_pred.shape[0]

            totals['loss'] += loss.item() * y_pred.shape[0]
            totals[self.metric_name] += categorical_accuracy(y, y_pred) * y_pred.shape[0]

        logs[self.prefix + 'loss'] = totals['loss'] / seen
        logs[self.metric_name] = totals[self.metric_name] / seen
        print(logs)
예제 #4
0
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        seen = 0
        totals = {'loss': 0, self.metric_name: 0}
        # this would get 80 batches of the test set sampled from NshotSampler
        for batch_index, batch in enumerate(self.taskloader): # episodes per epochs
            #import pdb; pdb.set_trace()
            x, y = self.prepare_batch(batch)
            # here x = (n_test + q_test) * k_test y = q_test * k _test
            # returns loss for the queried samples and probability scores of the queried samples
            loss, y_pred = self.eval_fn(
                self.model,
                self.optimiser,
                self.loss_fn,
                x,
                y,
                n_shot=self.n_shot,
                k_way=self.k_way,
                q_queries=self.q_queries,
                train=False,
                **self.kwargs
            )

            seen += y_pred.shape[0]

            totals['loss'] += loss.item() * y_pred.shape[0]
#            print(categorical_accuracy(y,y_pred))
            totals[self.metric_name] += categorical_accuracy(y, y_pred) * y_pred.shape[0]
        # average out total loss and accuracy for how many samples ?
        # len(self.task_loader) * (k_way * q_test)
        logs[self.prefix + 'loss'] = totals['loss'] / seen
        logs[self.metric_name] = totals[self.metric_name] / seen
#        import pdb; pdb.set_trace()
        print('Accuracy on eval', logs[self.metric_name])
        print('Loss on eval', logs[self.prefix + 'loss'])
예제 #5
0
    def _validate(self, epoch, logs=None):
        logs = logs or {}
        seen = 0
        totals = {'loss': 0, self.metric_name: 0}
        for batch_index, batch in enumerate(self.taskloader):
            x, y = self.prepare_batch(batch)

            loss, y_pred = self.eval_fn(self.model,
                                        self.optimiser,
                                        self.loss_fn,
                                        x,
                                        y,
                                        n_shot=self.n_shot,
                                        k_way=self.k_way,
                                        q_queries=self.q_queries,
                                        train=False,
                                        **self.kwargs)

            seen += y_pred.shape[0]

            totals['loss'] += loss.item() * y_pred.shape[0]
            totals[self.metric_name] += categorical_accuracy(y, y_pred) * \
                                        y_pred.shape[0]

        logs[self.prefix + 'loss'] = totals['loss'] / seen
        logs[self.metric_name] = totals[self.metric_name] / seen
예제 #6
0
파일: core.py 프로젝트: inverniz/few-shot
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        seen = 0
        totals = {'loss': 0, self.metric_name: 0}
        tot_y = np.array([], ndmin=1)
        tot_y_pred = np.array([], ndmin=1)
        for batch_index, batch in enumerate(self.taskloader):
            x, y = self.prepare_batch(batch)

            loss, y_pred = self.eval_fn(self.model,
                                        self.optimiser,
                                        self.loss_fn,
                                        x,
                                        y,
                                        n_shot=self.n_shot,
                                        k_way=self.k_way,
                                        q_queries=self.q_queries,
                                        train=False,
                                        **self.kwargs)

            seen += y_pred.shape[0]

            totals['loss'] += loss.item() * y_pred.shape[0]
            totals[self.metric_name] += categorical_accuracy(
                y, y_pred) * y_pred.shape[0]

            tot_y = np.concatenate((tot_y, y.data.cpu().numpy()), axis=0)
            tot_y_pred = np.concatenate(
                (tot_y_pred, y_pred.argmax(dim=-1).data.cpu().numpy()), axis=0)

        totals['mean_precision'] = mean_precision(tot_y, tot_y_pred)
        totals['mean_recall'] = mean_recall(tot_y, tot_y_pred)
        totals['cohen_kappa'] = cohen_kappa(tot_y, tot_y_pred)

        logs[self.prefix + 'loss'] = totals['loss'] / seen
        logs[self.metric_name] = totals[self.metric_name] / seen
        logs['mean_precision'] = totals['mean_precision']
        logs['mean_recall'] = totals['mean_recall']
        logs['cohen_kappa'] = totals['cohen_kappa']
예제 #7
0
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        seen = 0
        totals = {'loss': 0, self.metric_name: 0}
        for batch_index, batch in enumerate(self.taskloader):
            input_ids, attention_mask, label = self.prepare_batch(batch)
            
            device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
            
            input_ids = torch.squeeze(input_ids, dim=1)
            attention_mask = torch.squeeze(attention_mask, dim=1)
            
            input_ids = input_ids.to(device)
            attention_mask = attention_mask.to(device)
            label = label.to(device)

            loss, y_pred = self.eval_fn(
                self.model,
                self.optimiser,
                self.loss_fn,
                input_ids,
                attention_mask,
                label,
                n_shot=self.n_shot,
                k_way=self.k_way,
                q_queries=self.q_queries,
                train=False,
                **self.kwargs
            )

            seen += y_pred.shape[0]

            totals['loss'] += loss.item() * y_pred.shape[0]
            totals[self.metric_name] += categorical_accuracy(label, y_pred) * y_pred.shape[0]

        logs[self.prefix + 'loss'] = totals['loss'] / seen
        logs[self.metric_name] = totals[self.metric_name] / seen
        
        wandb.log({"Loss": logs[self.prefix + 'loss'], "accuracy": logs[self.metric_name]})
예제 #8
0
def test(model: Module,
         optimiser: Optimizer,
         loss_fn: Callable,
         dataloader: DataLoader,
         prepare_batch: Callable,
         eval_fn: Callable = gradient_step,
         eval_fn_kwargs: dict = {'train': True},
         prefix: str = 'test_'):
    num_batches = len(dataloader)
    pbar = tqdm(total=num_batches, desc='Testing')
    loss_name = f'{prefix}loss'
    acc_name = f'{prefix}acc'

    seen = 0
    totals = {loss_name: 0, acc_name: 0}
    for batch_index, batch in enumerate(dataloader):
        x, y = prepare_batch(batch)

        loss, y_pred = eval_fn(model, optimiser, loss_fn, x, y,
                               **eval_fn_kwargs)

        loss_value = loss.item()
        acc_value = categorical_accuracy(y, y_pred)

        pbar.update(1)
        pbar.set_postfix({loss_name: loss_value, acc_name: acc_value})

        seen += y_pred.shape[0]

        totals[loss_name] += loss_value * y_pred.shape[0]
        totals[acc_name] += acc_value * y_pred.shape[0]

    totals[loss_name] = totals[loss_name] / seen
    totals[acc_name] = totals[acc_name] / seen

    print(totals)
예제 #9
0
seen = 0
totals = {'loss': 0, 'ca': 0}
print(test_taskloader.dataset.subset)
for batch_index, batch in enumerate(test_taskloader):
    x, y = (prepare_meta_batch(args.n, args.k, args.q, 1))(batch)

    loss, y_pred = meta_gradient_step(
        meta_model,
        meta_optimiser,
        loss_fn,
        x,
        y,
        n_shot=args.n,
        k_way=args.k,
        q_queries=args.q,
        train=False,
        inner_train_steps=args.inner_val_steps,
        inner_lr=args.inner_lr,
        device=device,
        order=args.order,
    )

    seen += y_pred.shape[0]

    totals['loss'] += loss.item() * y_pred.shape[0]
    totals['ca'] += categorical_accuracy(y[:, -1:, :],
                                         y_pred)  # * y_pred.shape[0]

print(totals['loss'] / seen)
print(totals['ca'] / seen)
예제 #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--model_path",
        type=str,
        default=
        "./models/proto_nets/miniImageNet_nt=5_kt=5_qt=10_nv=5_kv=5_qv=10_dist=l2_sampling_method=True_is_diverisity=True.pth",
        help="model path")
    parser.add_argument(
        "--result_path",
        type=str,
        default="./results/proto_nets/5shot_training_5shot_diverisity.csv",
        help="Directory for evaluation report result (for experiments)")
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--distance', default='cosine')
    parser.add_argument('--n_train', default=1, type=int)
    parser.add_argument('--n_test', default=1, type=int)
    parser.add_argument('--k_train', default=5, type=int)
    parser.add_argument('--k_test', default=5, type=int)
    parser.add_argument('--q_train', default=15, type=int)
    parser.add_argument('--q_test', default=15, type=int)
    parser.add_argument(
        "--debug",
        action="store_true",
        help="set logging level DEBUG",
    )
    args = parser.parse_args()

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.DEBUG if args.debug else logging.INFO,
    )

    ###################
    # Create datasets #
    ###################
    episodes_per_epoch = 600

    if args.dataset == 'miniImageNet':
        n_epochs = 5
        dataset_class = MiniImageNet
        num_input_channels = 3
    else:
        raise (ValueError('need to make other datasets module'))

    test_dataset = dataset_class('test')
    test_dataset_taskloader = DataLoader(
        test_dataset,
        batch_sampler=NShotTaskSampler(test_dataset, episodes_per_epoch,
                                       args.n_test, args.k_test, args.q_test),
        num_workers=4)

    #########
    # Model #
    #########
    model = get_few_shot_encoder(num_input_channels).to(device,
                                                        dtype=torch.double)

    model.load_state_dict(torch.load(args.model_path), strict=False)
    model.eval()

    #############
    # Inference #
    #############
    logger.info("***** Epochs = %d *****", n_epochs)
    logger.info("***** Num episodes per epoch = %d *****", episodes_per_epoch)

    result_writer = ResultWriter(args.result_path)

    # just argument (function: proto_net_episode)
    prepare_batch = prepare_nshot_task(args.n_test, args.k_test, args.q_test)
    optimiser = Adam(model.parameters(), lr=1e-3)
    loss_fn = torch.nn.NLLLoss().cuda()

    train_iterator = trange(
        0,
        int(n_epochs),
        desc="Epoch",
    )
    for i_epoch in train_iterator:
        epoch_iterator = tqdm(
            test_dataset_taskloader,
            desc="Iteration",
        )
        seen = 0
        metric_name = f'test_{args.n_test}-shot_{args.k_test}-way_acc'
        metric = {metric_name: 0.0}
        for _, batch in enumerate(epoch_iterator):
            x, y = prepare_batch(batch)

            loss, y_pred = proto_net_episode(model,
                                             optimiser,
                                             loss_fn,
                                             x,
                                             y,
                                             n_shot=args.n_test,
                                             k_way=args.k_test,
                                             q_queries=args.q_test,
                                             train=False,
                                             distance=args.distance)

            seen += y_pred.shape[0]
            metric[metric_name] += categorical_accuracy(
                y, y_pred) * y_pred.shape[0]

        metric[metric_name] = metric[metric_name] / seen

        logger.info("epoch: {},     categorical_accuracy: {}".format(
            i_epoch, metric[metric_name]))
        result_writer.update(**metric)
예제 #11
0
def evaluate_few_shot(state_dict,
                      n_shot,
                      k_way,
                      q_queries,
                      device,
                      architecture='resnet18',
                      pretrained=False,
                      small_dataset=False,
                      metric_name=None,
                      evaluation_episodes=1000,
                      num_input_channels=3,
                      distance='l2'):
    if not pretrained:
        model = get_few_shot_encoder(num_input_channels)
        model.load_state_dict(state_dict)
    else:
        # assert torch.cuda.is_available()
        model = models.__dict__[architecture](pretrained=True)
        model.fc = Identity()
        model.load_state_dict(state_dict)

    dataset_class = FashionProductImagesSmall if small_dataset \
        else FashionProductImages

    # Meta-test set
    resize = (80, 60) if small_dataset else (400, 300)
    evaluation_transform = transforms.Compose([
        transforms.Resize(resize),
        transforms.ToTensor(),
    ])

    evaluation = FashionProductImagesSmall(DATA_PATH,
                                           split='all',
                                           classes='evaluation',
                                           transform=evaluation_transform)
    sampler = NShotTaskSampler(evaluation, evaluation_episodes, n_shot, k_way,
                               q_queries)
    taskloader = DataLoader(evaluation, batch_sampler=sampler, num_workers=4)
    prepare_batch = prepare_nshot_task(n_shot, k_way, q_queries)

    if metric_name is None:
        metric_name = f'test_{n_shot}-shot_{k_way}-way_acc'
    seen = 0
    totals = {'loss': 0, metric_name: 0}

    optimiser = torch.optim.Adam(model.parameters(), lr=1e-3)
    loss_fn = torch.nn.NLLLoss().to(device)

    for batch_index, batch in enumerate(taskloader):
        x, y = prepare_batch(batch)

        loss, y_pred = proto_net_episode(model,
                                         optimiser,
                                         loss_fn,
                                         x,
                                         y,
                                         n_shot=n_shot,
                                         k_way=k_way,
                                         q_queries=q_queries,
                                         train=False,
                                         distance=distance)

        seen += y_pred.shape[0]

        totals['loss'] += loss.item() * y_pred.shape[0]
        totals[metric_name] += categorical_accuracy(y, y_pred) * \
                               y_pred.shape[0]

    totals['loss'] = totals['loss'] / seen
    totals[metric_name] = totals[metric_name] / seen

    return totals