Example #1
0
def main():
    global args, best_acc1, device

    criterion = PrototypicalLoss().to(device)

    cudnn.benchmark = True

    runs_path = glob('runs/*')
    max_len_exp = max([len(x) for x in runs_path]) + 2
    print(f"|{'Experiment':^{max_len_exp}}|{'Loss':^17}|{'ACC':^17}|")

    except_list = []
    pl_mi = u"\u00B1"

    for exp in glob('runs/*'):
        checkpoint, args = None, None
        files = glob(exp + '/*')
        for file in files:
            if file.endswith('model_best.pth'):
                checkpoint = torch.load(os.path.abspath(file))
            elif file.endswith('.json'):
                params = json.load(open(os.path.abspath(file)))
                args = SimpleNamespace(**params)

        if checkpoint is None or args is None:
            except_list.append(f"checkpoint and params are not exist in {exp}")
            continue

        if args.dataset == 'omniglot':
            test_loader = get_dataloader(args, 'test')
        else:
            test_loader = get_dataloader(args, 'val')

        input_dim = 1 if args.dataset == 'omniglot' else 3
        if args.model == 'protonet':
            model = ProtoNet(input_dim).to(device)
        else:
            model = ResNet(input_dim).to(device)

        model.load_state_dict(checkpoint['model_state_dict'])
        best_acc1 = checkpoint['best_acc1']

        loss_list, acc_list = test(test_loader, model, criterion)

        loss, loss_moe = margin_of_error(loss_list)
        acc, acc_moe = margin_of_error(acc_list)

        loss_string = f'{loss:.3f} {pl_mi} {loss_moe:.3f}'
        acc_string = f'{acc:.3f} {pl_mi} {acc_moe:.3f}'

        print(f"|{exp:^{max_len_exp}}|{loss_string:^16}|{acc_string:^16}|")

    if len(except_list):
        pp(except_list)
Example #2
0
    def __init__(self, args, mode='train'):

        # init model and optimizer
        self.model = ProtoNet(args)
        print(self.model)
        if torch.cuda.is_available():
            self.model.cuda()

        if mode == 'train':
            if args.use_attention:
                self.optimizer = torch.optim.Adam(
                    [{
                        'params': self.model.encoder.parameters(),
                        'lr': 0.0001
                    }, {
                        'params': self.model.base_learner.parameters()
                    }, {
                        'params': self.model.att_learner.parameters()
                    }],
                    lr=args.lr)
            else:
                self.optimizer = torch.optim.Adam(
                    [{
                        'params': self.model.encoder.parameters(),
                        'lr': 0.0001
                    }, {
                        'params': self.model.base_learner.parameters()
                    }, {
                        'params': self.model.linear_mapper.parameters()
                    }],
                    lr=args.lr)
            #set learning rate scheduler
            self.lr_scheduler = optim.lr_scheduler.StepLR(
                self.optimizer, step_size=args.step_size, gamma=args.gamma)
            # load pretrained model for point cloud encoding
            self.model = load_pretrain_checkpoint(
                self.model, args.pretrain_checkpoint_path)
        elif mode == 'test':
            # Load model checkpoint
            self.model = load_model_checkpoint(self.model,
                                               args.model_checkpoint_path,
                                               mode='test')
        else:
            raise ValueError('Wrong GMMLearner mode (%s)! Option:train/test' %
                             mode)
 def get_parser(parser=None):
     if parser is None:
         parser = argparse.ArgumentParser(description='RelationNet')
     parser = ProtoNet.get_parser(parser)
     parser.add_argument('--loss_type',
                         type=str,
                         choices=['mse', 'softmax'],
                         default='mse')
     return parser
Example #4
0
    train_loader = DataLoader(dataset=trainset,
                              batch_sampler=train_sampler,
                              num_workers=8,
                              pin_memory=True)

    valset = Dataset('val', args)
    val_sampler = CategoriesSampler(valset.label, 500, args.validation_way,
                                    args.shot + args.query)
    val_loader = DataLoader(dataset=valset,
                            batch_sampler=val_sampler,
                            num_workers=8,
                            pin_memory=True)

    model = None
    if args.model_type.lower() == 'protonet':
        model = ProtoNet(args)
    elif args.model_type.lower() == 'hypnet':
        model = HypNet(args)
    elif args.model_type.lower() == 'protonetwithhyperbolic':
        model = ProtoNetWithHyperbolic(args)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    if args.lr_decay:
        lr_scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer, step_size=args.step_size, gamma=args.gamma)

        # load pre-trained model (no FC weights)
    model_dict = model.state_dict()
    if args.init_weights is not None:
        pretrained_dict = torch.load(args.init_weights)['params']
Example #5
0
                hidden_size=args.lang_hidden_size,
                rnn=args.rnn_type,
                num_layers=args.rnn_num_layers,
                dropout=args.rnn_dropout,
            )
            l3_model = l3_model.cuda()

        embedding_model = embedding_model.cuda()
        lang_model = lang_model.cuda()

    model = ProtoNet(
        model_dict[args.model],
        n_way=args.test_n_way,
        n_support=args.n_shot,
        # Language options
        lsl=args.lsl,
        language_model=lang_model,
        lang_supervision=args.lang_supervision,
        l3=args.l3,
        l3_model=l3_model,
        l3_n_infer=args.l3_n_infer,
    )

    model = model.cuda()

    if args.save_iter != -1:
        modelfile = get_assigned_file(args.checkpoint_dir, args.save_iter)
    else:
        modelfile = get_best_file(args.checkpoint_dir)

    if modelfile is not None:
        tmp = torch.load(modelfile)
Example #6
0
def main():
    global args, best_acc1, device

    # Init seed
    np.random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    torch.cuda.manual_seed(args.manual_seed)

    if args.dataset == 'omniglot':
        train_loader, val_loader = get_dataloader(args, 'trainval', 'test')
        input_dim = 1
    else:
        train_loader, val_loader = get_dataloader(args, 'train', 'val')
        input_dim = 3

    if args.model == 'protonet':
        model = ProtoNet(input_dim).to(device)
        print("ProtoNet loaded")
    else:
        model = ResNet(input_dim).to(device)
        print("ResNet loaded")

    criterion = PrototypicalLoss().to(device)

    optimizer = torch.optim.Adam(model.parameters(), args.lr)

    cudnn.benchmark = True

    if args.resume:
        try:
            checkpoint = torch.load(
                sorted(glob(f'{args.log_dir}/checkpoint_*.pth'), key=len)[-1])
        except Exception:
            checkpoint = torch.load(args.log_dir + '/model_best.pth')
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        start_epoch = checkpoint['epoch']
        best_acc1 = checkpoint['best_acc1']

        print(f"load checkpoint {args.exp_name}")
    else:
        start_epoch = 1

    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer=optimizer,
        gamma=args.lr_scheduler_gamma,
        step_size=args.lr_scheduler_step)

    print(
        f"model parameter : {sum(p.numel() for p in model.parameters() if p.requires_grad)}"
    )

    for epoch in range(start_epoch, args.epochs + 1):

        train_loss = train(train_loader, model, optimizer, criterion, epoch)

        is_test = False if epoch % args.test_iter else True
        if is_test or epoch == args.epochs or epoch == 1:

            val_loss, acc1 = validate(val_loader, model, criterion, epoch)

            if acc1 >= best_acc1:
                is_best = True
                best_acc1 = acc1
            else:
                is_best = False

            save_checkpoint(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer_state_dict': optimizer.state_dict(),
                }, is_best, args)

            if is_best:
                writer.add_scalar("BestAcc", acc1, epoch)

            print(
                f"[{epoch}/{args.epochs}] {train_loss:.3f}, {val_loss:.3f}, {acc1:.3f}, # {best_acc1:.3f}"
            )

        else:
            print(f"[{epoch}/{args.epochs}] {train_loss:.3f}")

        scheduler.step()

    writer.close()
Example #7
0
    )
    print("Loading val data\n")
    val_loader = val_datamgr.get_data_loader(
        val_file,
        aug=False,
        lang_dir=constants.LANG_DIR,
        normalize=True,
        vocab=vocab,
    )
    # a batch for SetDataManager: a [n_way, n_support + n_query, dim, w, h] tensor

    model = ProtoNet(
        model_dict[args.model],
        **train_few_shot_args,
        # Language options
        lsl=args.lsl,
        language_model=lang_model,
        lang_supervision=args.lang_supervision,
        l3=args.l3,
        l3_model=l3_model,
        l3_n_infer=args.l3_n_infer)

    model = model.cuda()

    os.makedirs(args.checkpoint_dir, exist_ok=True)

    start_epoch = args.start_epoch
    stop_epoch = args.stop_epoch

    if args.resume:
        resume_file = get_resume_file(args.checkpoint_dir)
        if resume_file is not None:
Example #8
0
class ProtoLearner(object):
    def __init__(self, args, mode='train'):

        # init model and optimizer
        self.model = ProtoNet(args)
        print(self.model)
        if torch.cuda.is_available():
            self.model.cuda()

        if mode == 'train':
            if args.use_attention:
                self.optimizer = torch.optim.Adam(
                    [{
                        'params': self.model.encoder.parameters(),
                        'lr': 0.0001
                    }, {
                        'params': self.model.base_learner.parameters()
                    }, {
                        'params': self.model.att_learner.parameters()
                    }],
                    lr=args.lr)
            else:
                self.optimizer = torch.optim.Adam(
                    [{
                        'params': self.model.encoder.parameters(),
                        'lr': 0.0001
                    }, {
                        'params': self.model.base_learner.parameters()
                    }, {
                        'params': self.model.linear_mapper.parameters()
                    }],
                    lr=args.lr)
            #set learning rate scheduler
            self.lr_scheduler = optim.lr_scheduler.StepLR(
                self.optimizer, step_size=args.step_size, gamma=args.gamma)
            # load pretrained model for point cloud encoding
            self.model = load_pretrain_checkpoint(
                self.model, args.pretrain_checkpoint_path)
        elif mode == 'test':
            # Load model checkpoint
            self.model = load_model_checkpoint(self.model,
                                               args.model_checkpoint_path,
                                               mode='test')
        else:
            raise ValueError('Wrong GMMLearner mode (%s)! Option:train/test' %
                             mode)

    def train(self, data):
        """
        Args:
            data: a list of torch tensors wit the following entries.
            - support_x: support point clouds with shape (n_way, k_shot, in_channels, num_points)
            - support_y: support masks (foreground) with shape (n_way, k_shot, num_points)
            - query_x: query point clouds with shape (n_queries, in_channels, num_points)
            - query_y: query labels with shape (n_queries, num_points)
        """

        [support_x, support_y, query_x, query_y] = data
        self.model.train()

        query_logits, loss = self.model(support_x, support_y, query_x, query_y)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        self.lr_scheduler.step()

        query_pred = F.softmax(query_logits, dim=1).argmax(dim=1)
        correct = torch.eq(query_pred,
                           query_y).sum().item()  # including background class
        accuracy = correct / (query_y.shape[0] * query_y.shape[1])

        return loss, accuracy

    def test(self, data):
        """
        Args:
            support_x: support point clouds with shape (n_way, k_shot, in_channels, num_points)
            support_y: support masks (foreground) with shape (n_way, k_shot, num_points), each point \in {0,1}.
            query_x: query point clouds with shape (n_queries, in_channels, num_points)
            query_y: query labels with shape (n_queries, num_points), each point \in {0,..., n_way}
        """
        [support_x, support_y, query_x, query_y] = data
        self.model.eval()

        with torch.no_grad():
            logits, loss = self.model(support_x, support_y, query_x, query_y)
            pred = F.softmax(logits, dim=1).argmax(dim=1)
            correct = torch.eq(pred, query_y).sum().item()
            accuracy = correct / (query_y.shape[0] * query_y.shape[1])

        return pred, loss, accuracy