コード例 #1
0
    def run_full_evaluation(self, model, model_path, model_tag, shots, method, callback, target_split_dir, checking, query_shots, number_tasks, n_ways, inductive):
        """
        Run the evaluation over all the tasks in parallel
        inputs:
            model : The loaded model containing the feature extractor
            loaders_dic : Dictionnary containing training and testing loaders
            model_path : Where was the model loaded from
            model_tag : Which model ('final' or 'best') to load
            method : Which method to use for inference ("baseline", "tim-gd" or "tim-adm")
            shots : Number of support shots to try

        returns :
            results : List of the mean accuracy for each number of support shots
        """
        print("=> Runnning full evaluation with method: {}".format(method))

        # Load pre-trained model
        load_checkpoint(model=model, model_path=model_path, type=model_tag)

        # Get loaders
        loaders_dic = self.get_loaders()

        # Extract features (just load them if already in memory)
        extracted_features_shots_dic = self.extract_features_shots(model=model,
                                                       model_path=model_path,
                                                       loaders_dic=loaders_dic)

        extracted_features_queries_dic = self.extract_features_queries(model=model,
                                                        model_path=model_path,
                                                        loaders_dic=loaders_dic)


        results = []
    
        for shot in shots:
            tasks = self.generate_task(extracted_features_shots_dic=extracted_features_shots_dic,
                                    extracted_features_queries_dic=extracted_features_queries_dic, 
                                    shot=shot)
            if inductive == True:
                tabla = self.run_task_inductive(task_dic=tasks,
                                    model=model,
                                    callback=callback,
                                    n_ways=n_ways)
            else:
                tabla = self.run_task_transductive(task_dic=tasks,
                    model=model,
                    callback=callback,
                    n_ways=n_ways)
            n_img = 0
            for i in query_shots:
                n_img += i
            print('Confusion matrix: \n'+str(tabla))

        return results
コード例 #2
0
def setup_model(args):
    """Setup model and optimizer."""

    model = get_model(args)
    if DEEPSPEED_WRAP and args.deepspeed:
        print_rank_0("DeepSpeed is enabled.")

        model, optimizer, _, lr_scheduler = DEEPSPEED_WRAP.deepspeed.initialize(
            model=model,
            optimizer=None,
            args=args,
            lr_scheduler=None,
            mpu=mpu,
            dist_init_required=False)

    print("Load checkpoint from " + args.load)
    _ = load_checkpoint(model,
                        None,
                        None,
                        args,
                        deepspeed=DEEPSPEED_WRAP and args.deepspeed)
    model.eval()
    print("Loaded")
    if args.export_huggingface is not None:
        export_to_huggingface_model(model, args.export_huggingface)
        print(f"Exported in huggingface format to {args.export_huggingface}")

    return model
コード例 #3
0
def setup_model_and_optimizer(args):
    """Setup model and optimizer."""

    print ("setting up model...")
    model = get_model(args)
    print ("setting up optimizer...")
    optimizer = get_optimizer(model, args)
    print ("setting up lr scheduler...")
    lr_scheduler = get_learning_rate_scheduler(optimizer, args)

    
    if DEEPSPEED_WRAP and args.deepspeed:
        print_rank_0("DeepSpeed is enabled.")

        print ("Calling deepspeed.initialize with our model, optimizer and scheduler")
        model, optimizer, _, lr_scheduler = DEEPSPEED_WRAP.deepspeed.initialize(
            model=model,
            optimizer=optimizer,
            args=args,
            lr_scheduler=lr_scheduler,
            mpu=mpu,
            dist_init_required=False
        )
        print ("We've wrapped our model, optimizer and scheduler in DeepSpeed")

    if args.load is not None:
        print_rank_0("Load checkpoint from " + args.load)
        args.iteration = load_checkpoint(model, optimizer, lr_scheduler, args, deepspeed=DEEPSPEED_WRAP and args.deepspeed)
        print_rank_0("Checkpoint loaded")
#         input ("This was all it took? Mother...")
    else:
        args.iteration = 0

    print ("returning our model, optimizer and scheduler")    
    return model, optimizer, lr_scheduler
コード例 #4
0
def predict(model_path, devices, compression, message):
    """WIP"""

    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    test_img_paths = list(map(str, Path(ROOT + 'data/test/').glob('*.jpg')))
    submission = pd.read_csv(ROOT + 'data/sample_submission.csv')

    model = models.UNet(in_channels=3,
                        n_classes=2,
                        depth=4,
                        ch_first=32,
                        padding=True,
                        batch_norm=False,
                        up_mode='upconv').cuda()
    model = utils.load_checkpoint(model, model_path)

    sub_path = ROOT + f'submit/{params["ex_name"]}.csv'
    if compression:
        sub_path += '.gz'
        submission.to_csv(sub_path, index=False, compression='gzip')
    else:
        submission.to_csv(sub_path, index=False)

    if message is None:
        message = params['ex_name']

    cmd = f'kaggle c submit -c airbus-ship-detection -f {sub_path} -m "{message}"'
    subprocess.run(cmd, shell=True)
コード例 #5
0
def load_colbert(args):
    print_message("#> Loading model checkpoint.")
    if args.dense:
        colbert = ColBERT.from_pretrained(
            "bert-base-uncased",
            query_maxlen=args.query_maxlen,
            doc_maxlen=args.doc_maxlen,
            dim=args.dim,
            similarity_metric=args.similarity,
        )
    else:
        colbert = SparseColBERT.from_pretrained(
            "bert-base-uncased",
            query_maxlen=args.query_maxlen,
            doc_maxlen=args.doc_maxlen,
            k=args.k,
            n=args.n,
            use_nonneg=args.use_nonneg,
            normalize_sparse=args.normalize_sparse,
            similarity_metric=args.similarity,
        )
    colbert = colbert.to(DEVICE)
    checkpoint = load_checkpoint(args.checkpoint, colbert)
    colbert.eval()

    print("\n")

    return colbert, checkpoint
コード例 #6
0
def setup_model_and_optimizer(args):
    """Setup model and optimizer."""

    model = get_model(args)
    optimizer = get_optimizer(model, args)
    lr_scheduler = get_learning_rate_scheduler(optimizer, args)

    if DEEPSPEED_WRAP and args.deepspeed:
        print_rank_0("DeepSpeed is enabled.")

        model, optimizer, _, lr_scheduler = DEEPSPEED_WRAP.deepspeed.initialize(
            model=model,
            optimizer=optimizer,
            args=args,
            lr_scheduler=lr_scheduler,
            mpu=mpu,
            dist_init_required=False
        )

    if args.load is not None:
        print_rank_0("Load checkpoint from " + args.load)
        args.iteration = load_checkpoint(model, optimizer, lr_scheduler, args, deepspeed=DEEPSPEED_WRAP and args.deepspeed)
        print_rank_0("Checkpoint loaded")
    else:
        args.iteration = 0

    return model, optimizer, lr_scheduler
コード例 #7
0
def create_model(project_parameters):
    model = Net(project_parameters=project_parameters)
    if project_parameters.checkpoint_path is not None:
        model = load_checkpoint(
            model=model,
            num_classes=project_parameters.num_classes,
            use_cuda=project_parameters.use_cuda,
            checkpoint_path=project_parameters.checkpoint_path)
    return model
コード例 #8
0
    def run_full_evaluation(self, model, model_path, model_tag, shots, method,
                            callback):
        """
        Run the evaluation over all the tasks in parallel
        inputs:
            model : The loaded model containing the feature extractor
            loaders_dic : Dictionnary containing training and testing loaders
            model_path : Where was the model loaded from
            model_tag : Which model ('final' or 'best') to load
            method : Which method to use for inference ("baseline", "tim-gd" or "tim-adm")
            shots : Number of support shots to try

        returns :
            results : List of the mean accuracy for each number of support shots
        """
        print("=> Runnning full evaluation with method: {}".format(method))

        # Load pre-trained model
        load_checkpoint(model=model, model_path=model_path, type=model_tag)

        # Get loaders
        loaders_dic = self.get_loaders()

        # Extract features (just load them if already in memory)
        extracted_features_dic = self.extract_features(model=model,
                                                       model_path=model_path,
                                                       loaders_dic=loaders_dic)
        results = []
        for shot in shots:

            tasks = self.generate_tasks(
                extracted_features_dic=extracted_features_dic, shot=shot)
            logs = self.run_task(task_dic=tasks,
                                 model=model,
                                 callback=callback)

            l2n_mean, l2n_conf = compute_confidence_interval(logs['acc'][:,
                                                                         -1])

            print('==> Meta Test: {} \nfeature\tL2N\n{}-shot \t{:.4f}({:.4f})'.
                  format(model_tag.upper(), shot, l2n_mean, l2n_conf))
            results.append(l2n_mean)
        return results
コード例 #9
0
ファイル: loaders.py プロジェクト: DI4IR/SIGIR2021
def load_colbert(args):
    print_message("#> Loading model checkpoint.")
    colbert = MultiBERT.from_pretrained('bert-base-uncased')
    colbert = colbert.to(DEVICE)
    checkpoint = load_checkpoint(args.checkpoint, colbert)
    colbert.eval()

    print('\n')

    return colbert, checkpoint
コード例 #10
0
ファイル: gcn.py プロジェクト: AChepurnoi/kaggle-airbus-ship
def load_model(checkpoint=None):
    model = GCN(1, 768)
    print("GCN is loading!")
    state = {'epoch': 0, 'lb_acc': 0}
    if LOAD_CHECKPOINT:
        state = load_checkpoint(model, checkpoint)

    # model.freeze_encoder()
    model.train()
    model.to(DEVICE)
    print("Trainable Parameters: %s" % count_parameters(model))
    return model, state
コード例 #11
0
def load_model(args):
    args.model = SparseColBERT.from_pretrained(
        "bert-base-uncased",
        query_maxlen=args.query_maxlen,
        doc_maxlen=args.doc_maxlen,
        k=args.k,
        n=args.n,
        k_inference_factor=args.k_inference_factor,
    )
    args.model = args.model.to(DEVICE)
    checkpoint = load_checkpoint(args.checkpoint, args.model)
    args.model.eval()

    return args.model, checkpoint
コード例 #12
0
def load_colbert(args):
    print_message("#> Loading model checkpoint.")
    colbert = ColBERT.from_pretrained('bert-base-uncased',
                                      query_maxlen=args.query_maxlen,
                                      doc_maxlen=args.doc_maxlen,
                                      dim=args.dim,
                                      similarity_metric=args.similarity)
    colbert = colbert.to(DEVICE)
    checkpoint = load_checkpoint(args.checkpoint, colbert)
    colbert.eval()

    print('\n')

    return colbert, checkpoint
コード例 #13
0
    def test_save_and_load_checkpoint(self):
        model = torchvision.models.resnet18(pretrained=False)
        utils.save_checkpoint(model,
                              epoch=100,
                              filename='tmp.pth',
                              save_arch=True)

        loaded_model = utils.load_model('tmp.pth')

        torch.testing.assert_allclose(model.conv1.weight,
                                      loaded_model.conv1.weight)

        model.conv1.weight = nn.Parameter(torch.zeros_like(model.conv1.weight))
        model = utils.load_checkpoint('tmp.pth', model=model)['model']

        assert (model.conv1.weight != 0).any()
コード例 #14
0
def load_model(checkpoint=None):
    model = AttentionUnet()
    state = {'epoch': 0, 'lb_acc': 0}
    if LOAD_CHECKPOINT:
        state = load_checkpoint(model, checkpoint)

    # model.freeze_encoder()
    optimizer = torch.optim.SGD(model.trainable_params(), lr=LEARNING_RATE, weight_decay=L2_REG)

    # optimizer = torch.optim.Adam(model.trainable_params(), lr=LEARNING_RATE, weight_decay=L2_REG, amsgrad=True)
    model.train()
    model.to(DEVICE)
    scheduler = ReduceLROnPlateau(mode='max', optimizer=optimizer, min_lr=1e-3,
                                  patience=DECREASE_LR_EPOCH, factor=0.5, verbose=True)
    print("Trainable Parameters: %s" % count_parameters(model))

    return model, optimizer, scheduler, state
コード例 #15
0
    def extract(self):
        print('Starting extracting ...')
        self.model.module.eval()

        # Loading extracting model
        print('Loading extracting model ...')
        checkpoint = U.load_checkpoint(self.device_type, self.model_name)
        self.model.module.load_state_dict(checkpoint['model'])
        self.optimizer.load_state_dict(checkpoint['optimizer'])
        print('Successful!\n')

        # Loading Data
        x, l, y, name = iter(self.eval_loader).next()

        # Using GPU
        x = x.to(self.device)
        y = y.to(self.device)

        # Calculating Output
        out, feature = self.model(x)
        out = F.softmax(out, dim=1)

        # Using CPU
        out = out.detach().cpu().numpy()
        x = x.cpu().numpy()
        y = y.cpu().numpy()

        # Loading Weight
        weight = []
        W = self.get_weights()
        for i in range(self.args.model_stream):
            weight.append(W[i].detach().cpu().numpy())
            feature[i] = feature[i].detach().cpu().numpy()

        # Saving Feature
        np.savez('./visualize.npz',
                 feature=feature,
                 out=out,
                 weight=weight,
                 label=y,
                 location=l.numpy(),
                 name=name)
        print('Finish extracting!\n')
コード例 #16
0
def setup_model(args):
    """Setup model and optimizer."""

    model = magic_get_model(args)
#     if DEEPSPEED_WRAP and args.deepspeed:
#         print_rank_0("DeepSpeed is enabled.")

# #         optimizer='adam'
#         print ("Restoring our optimizer from a pickle...")
#         with open("/notebooks/sberbank_rugpts/our_model/optimizer.pkl", "rb") as f:
#             optimizer = pickle.load(f)
#         print (f"I'm pickle Riiick! I mean, optimizer now is {optimizer}")
#         model, optimizer, _, lr_scheduler = DEEPSPEED_WRAP.deepspeed.initialize(
#             model=model,
#             optimizer=optimizer,
#             args=args,
#             lr_scheduler=None,
#             mpu=mpu,
#             dist_init_required=False
#         )
#         optimizer = "FusedAdam"
#         model, optimizer, _, lr_scheduler = DEEPSPEED_WRAP.deepspeed.initialize(
#             model=model,
#             optimizer=None,
#             args=args,
#             lr_scheduler=None,
#             mpu=mpu,
#             dist_init_required=False
#         )


    print("Load checkpoint from " + args.load)
    _ = load_checkpoint(model, None, None, args, deepspeed=DEEPSPEED_WRAP and args.deepspeed)
#     _ = load_checkpoint(model, None, None, args, deepspeed=True)
    model.eval()
    print("Loaded")
    if args.export_huggingface is not None:
        export_to_huggingface_model(model, args.export_huggingface)
        print(f"Exported in huggingface format to {args.export_huggingface}")

    return model
コード例 #17
0
ファイル: nets.py プロジェクト: peter-yys-yoon/pegcnv2
    def __init__(self, data_shape, num_class, A, drop_prob, gcn_kernel_size,
                 model_stream, subset, pretrained, tag):
        super().__init__()

        C, T, V, M = data_shape
        self.register_buffer('A', A)

        # baseline
        self.stgcn_stream = nn.ModuleList((ST_GCN(data_shape, num_class, A,
                                                  drop_prob, gcn_kernel_size)
                                           for _ in range(model_stream)))

        # load pretrained baseline
        if pretrained:
            for stgcn in self.stgcn_stream:
                checkpoint = U.load_checkpoint(tag, 'baseline_NTU' + subset)
                stgcn.load_state_dict(checkpoint['model'])
                # stgcn.module.load_state_dict(checkpoint['model'])

        # mask
        self.mask_stream = nn.ParameterList(
            [nn.Parameter(torch.ones(T * V * M)) for _ in range(model_stream)])
コード例 #18
0
ファイル: train.py プロジェクト: r0l1/progan-pytorch
    def load_checkpoint(self):
        try:
            map_location = "cuda:0" if torch.cuda.is_available() else "cpu"
            ckpt = load_checkpoint(self.checkpoint_dir,
                                   map_location=map_location)
            # Transition settings
            self.is_transitioning = ckpt["is_transitioning"]
            self.transition_step = ckpt["transition_step"]
            self.current_imsize = ckpt["current_imsize"]
            self.latest_switch = ckpt["latest_switch"]
            self.num_skipped_steps = ckpt["num_skipped_steps"]

            # Tracking stats
            self.global_step = ckpt["global_step"]
            self.start_time = time.time() - ckpt["total_time"] * 60

            # Models
            self.discriminator.load_state_dict(ckpt['D'])

            self.generator.load_state_dict(ckpt['G'])
            self.running_average_generator.load_state_dict(
                ckpt["running_average_generator"])
            to_cuda([
                self.generator, self.discriminator,
                self.running_average_generator
            ])
            self.running_average_generator = amp.initialize(
                self.running_average_generator, None, opt_level=self.opt_level)
            self.init_optimizers()
            self.d_optimizer.load_state_dict(ckpt['d_optimizer'])
            self.g_optimizer.load_state_dict(ckpt['g_optimizer'])
            return True
        except FileNotFoundError as e:
            print(e)
            print(' [*] No checkpoint!')
            return False
コード例 #19
0
ファイル: cyclegan.py プロジェクト: new-okaerinasai/ecal_5d
    def __init__(self, opt):
        super(Model, self).__init__()

        self.gpu_id = opt.gpu_ids[0]
        self.weights_path = os.path.join(opt.experiment_path, 'checkpoints')

        # Generators
        self.gen_A = Generator(opt, 'A', opt.gen_type_name_A)
        self.gen_B = Generator(opt, 'B', opt.gen_type_name_B)

        # Discriminators
        self.dis_A = DiscriminatorWrapper(opt, 'A')
        self.dis_B = DiscriminatorWrapper(opt, 'B')

        # Load weights
        utils.load_checkpoint(self, opt.which_epoch, opt.pretrained_gen_path)

        # Print architectures
        print('\nGen A to B\n')
        num_params = 0
        for p in self.gen_B.parameters():
            num_params += p.numel()
        print(self.gen_B)
        print('Number of parameters: %d' % num_params)

        print('\nGen B to A\n')
        num_params = 0
        for p in self.gen_A.parameters():
            num_params += p.numel()
        print(self.gen_A)
        print('Number of parameters: %d' % num_params)

        print('\nDis A\n')
        num_params = 0
        for p in self.dis_A.parameters():
            num_params += p.numel()
        print(self.dis_A)
        print('Number of parameters: %d' % num_params)

        print('\nDis B\n')
        num_params = 0
        for p in self.dis_B.parameters():
            num_params += p.numel()
        print(self.dis_B)
        print('Number of parameters: %d' % num_params)

        self.gen_params = chain(self.gen_A.parameters(),
                                self.gen_B.parameters())

        self.dis_params = chain(self.dis_A.parameters(),
                                self.dis_B.parameters())

        # Losses
        self.crit_dis_A = DiscriminatorLoss(opt, self.dis_A)
        self.crit_dis_B = DiscriminatorLoss(opt, self.dis_B)

        # If an encoder is required, load the weights
        if (opt.mse_loss_type_A == 'perceptual'
                or opt.mse_loss_type_B == 'perceptual'
                or hasattr(self, 'dis_A') and self.dis_A.use_encoder
                or hasattr(self, 'dis_B') and self.dis_B.use_encoder):

            # Load encoder
            if opt.enc_type[:5] == 'vgg19':
                layers = '1,6,11,20,29'

            self.enc = FeatureExtractor(input_range='tanh',
                                        net_type=opt.enc_type,
                                        layers=layers).eval()

            print('')
            print(self.enc)
            print('')

        else:

            self.enc = None

        self.crit_mse_A = utils.get_criterion(opt.mse_loss_type_A,
                                              opt.mse_loss_weight_A, self.enc)
        self.crit_mse_B = utils.get_criterion(opt.mse_loss_type_B,
                                              opt.mse_loss_weight_B, self.enc)

        self.weights_path = os.path.join(opt.experiment_path, 'checkpoints')

        # In case domains have different sizes, this is needed for mse loss
        scale_factor = opt.img_size_B // opt.img_size_A

        self.down = nn.AvgPool2d(scale_factor)
        self.up = nn.Upsample(scale_factor=scale_factor,
                              mode='bilinear',
                              align_corners=False)

        # Load onto gpus
        self.gen_A = nn.DataParallel(self.gen_A.cuda(self.gpu_id), opt.gpu_ids)
        self.gen_B = nn.DataParallel(self.gen_B.cuda(self.gpu_id), opt.gpu_ids)
        self.dis_A = nn.DataParallel(self.dis_A.cuda(self.gpu_id), opt.gpu_ids)
        self.dis_B = nn.DataParallel(self.dis_B.cuda(self.gpu_id), opt.gpu_ids)
        if self.enc is not None:
            self.enc = nn.DataParallel(self.enc.cuda(self.gpu_id), opt.gpu_ids)
コード例 #20
0
    def start(self):
        # Training Start
        start_time = time.time()

        if self.args.evaluate:
            # Loading evaluating model
            print('Loading evaluating model ...')
            checkpoint = U.load_checkpoint(self.model_name)
            self.model.module.load_state_dict(checkpoint['model'])
            self.optimizer.module.load_state_dict(checkpoint['optimizer'])
            print('Successful!\n')

            # Start evaluating
            print('Starting evaluating ...')
            self.model.module.eval()
            acc = self.eval()
            print('Finish evaluating!')
            print('Best accuracy: {:2.2f}%, Total time:{:.4f}s'.format(
                acc,
                time.time() - start_time))

        else:
            # Resuming
            start_epoch, best_acc = 0, 0
            if self.args.resume:
                print('Loading checkpoint ...')
                checkpoint = U.load_checkpoint()
                self.model.module.load_state_dict(checkpoint['model'])
                self.optimizer.module.load_state_dict(checkpoint['optimizer'])
                start_epoch = checkpoint['epoch']
                best_acc = checkpoint['best']
                print('Successful!\n')

            # Start training
            print('Starting training ...')
            self.model.module.train()
            for epoch in range(start_epoch, self.args.max_epoch):

                # Adjusting learning rate
                self.adjust_lr(epoch)

                # Training
                acc = self.train(epoch)
                print(
                    'Epoch: {}/{}, Training accuracy: {:2.2f}%, Training time: {:.4f}s\n'
                    .format(epoch + 1, self.args.max_epoch, acc,
                            time.time() - start_time))

                # Evaluating
                is_best = False
                if (epoch +
                        1) > self.args.adjust_lr[-1] and (epoch + 1) % 2 == 0:
                    print('Evaluating for epoch {} ...'.format(epoch + 1))
                    self.model.module.eval()
                    acc = self.eval()
                    print(
                        'Epoch: {}/{}, Evaluating accuracy: {:2.2f}%, Evaluating time: {:.4f}s\n'
                        .format(epoch + 1, self.args.max_epoch, acc,
                                time.time() - start_time))
                    self.model.module.train()
                    if acc > best_acc:
                        best_acc = acc
                        is_best = True

                # Saving model
                U.save_checkpoint(self.model.module.state_dict(),
                                  self.optimizer.module.state_dict(),
                                  epoch + 1, best_acc, is_best,
                                  self.model_name)
            print('Finish training!')
            print('Best accuracy: {:2.2f}%, Total time: {:.4f}s'.format(
                best_acc,
                time.time() - start_time))
def job(tuning, params_path, devices, resume, save_interval):
    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    exp_path = ROOT + f'experiments/{params["ex_name"]}/'
    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    if resume is None:
        # C-AIRとABCIで整合性が取れるようにしている。
        params[
            'base_ckpt_path'] = f'experiments/v1only/ep4_augmentation-soft_epochs-5_loss-{params["loss"]}.pth'
        params[
            'clean_path'] = ROOT + f'input/clean/train19_cleaned_verifythresh{params["verifythresh"]}_freqthresh{params["freqthresh"]}.csv'
    else:
        params = utils.load_checkpoint(path=resume, params=True)['params']

    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')

    if params['augmentation'] == 'soft':
        params['scale_limit'] = 0.2
        params['brightness_limit'] = 0.1
    elif params['augmentation'] == 'middle':
        params['scale_limit'] = 0.3
        params['shear_limit'] = 4
        params['brightness_limit'] = 0.1
        params['contrast_limit'] = 0.1
    else:
        raise ValueError

    train_transform, eval_transform = data_utils.build_transforms(
        scale_limit=params['scale_limit'],
        shear_limit=params['shear_limit'],
        brightness_limit=params['brightness_limit'],
        contrast_limit=params['contrast_limit'],
    )

    data_loaders = data_utils.make_train_loaders(
        params=params,
        data_root=ROOT + 'input/' + params['data'],
        train_transform=train_transform,
        eval_transform=eval_transform,
        scale='SS2',
        test_size=0,
        class_topk=params['class_topk'],
        num_workers=8)

    model = models.LandmarkNet(
        n_classes=params['class_topk'],
        model_name=params['model_name'],
        pooling=params['pooling'],
        loss_module=params['loss'],
        s=params['s'],
        margin=params['margin'],
        theta_zero=params['theta_zero'],
        use_fc=params['use_fc'],
        fc_dim=params['fc_dim'],
    ).cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = utils.get_optim(params, model)

    if resume is None:
        sdict = torch.load(ROOT + params['base_ckpt_path'])['state_dict']
        if params['loss'] == 'adacos':
            del sdict['final.W']  # remove fully-connected layer
        elif params['loss'] == 'softmax':
            del sdict['final.weight'], sdict[
                'final.bias']  # remove fully-connected layer
        else:
            del sdict['final.weight']  # remove fully-connected layer
        model.load_state_dict(sdict, strict=False)

        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=params['epochs'] * len(data_loaders['train']),
            eta_min=3e-6)
        start_epoch, end_epoch = (0,
                                  params['epochs'] - params['scaleup_epochs'])
    else:
        ckpt = utils.load_checkpoint(path=resume,
                                     model=model,
                                     optimizer=optimizer,
                                     epoch=True)
        model, optimizer, start_epoch = ckpt['model'], ckpt[
            'optimizer'], ckpt['epoch'] + 1
        end_epoch = params['epochs']

        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=params['epochs'] * len(data_loaders['train']),
            eta_min=3e-6,
            last_epoch=start_epoch * len(data_loaders['train']))

        setting += 'scaleup_' + resume.split('/')[-1].replace('.pth', '')

        data_loaders = data_utils.make_verified_train_loaders(
            params=params,
            data_root=ROOT + 'input/' + params['data'],
            train_transform=train_transform,
            eval_transform=eval_transform,
            scale='M2',
            test_size=0,
            num_workers=8)
        batch_norm.freeze_bn(model)

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    for epoch in range(start_epoch, end_epoch):
        logger.info(f'Epoch {epoch}/{end_epoch}')

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))

            if i % 100 == 99:
                logger.info(
                    f'{epoch+i/len(data_loaders["train"]):.2f}epoch | {setting} acc: {prec1.avg}'
                )

        train_loss = losses.avg
        train_acc = prec1.avg

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        if (epoch + 1) == end_epoch or (epoch + 1) % save_interval == 0:
            output_file_name = exp_path + f'ep{epoch}_' + setting + '.pth'
            utils.save_checkpoint(path=output_file_name,
                                  model=model,
                                  epoch=epoch,
                                  optimizer=optimizer,
                                  params=params)

    model = model.module
    datasets = ('oxford5k', 'paris6k', 'roxford5k', 'rparis6k')
    results = eval_datasets(model,
                            datasets=datasets,
                            ms=True,
                            tta_gem_p=1.0,
                            logger=logger)

    if tuning:
        tuning_result = {}
        for d in datasets:
            if d in ('oxford5k', 'paris6k'):
                tuning_result[d] = results[d]
            else:
                for key in ['mapE', 'mapM', 'mapH']:
                    mapE, mapM, mapH, mpE, mpM, mpH, kappas = results[d]
                    tuning_result[d + '-' + key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')
コード例 #22
0
    def run_full_evaluation(self, model, model_path, model_tag, shots, method,
                            callback, target_split_dir, checking, n_ways):
        """
        Run the evaluation over all the tasks in parallel
        inputs:
            model : The loaded model containing the feature extractor
            loaders_dic : Dictionnary containing training and testing loaders
            model_path : Where was the model loaded from
            model_tag : Which model ('final' or 'best') to load
            method : Which method to use for inference ("baseline", "tim-gd" or "tim-adm")
            shots : Number of support shots to try

        returns :
            results : List of the mean accuracy for each number of support shots
        """
        print("=> Runnning full evaluation with method: {}".format(method))

        # Load pre-trained model
        load_checkpoint(model=model, model_path=model_path, type=model_tag)

        # Get loaders
        loaders_dic = self.get_loaders()

        # Extract features (just load them if already in memory)
        extracted_features_shots_dic = self.extract_features_shots(
            model=model, model_path=model_path, loaders_dic=loaders_dic)

        extracted_features_queries_dic = self.extract_features_queries(
            model=model, model_path=model_path, loaders_dic=loaders_dic)

        results = []

        tasks = self.generate_task(
            extracted_features_shots_dic=extracted_features_shots_dic,
            extracted_features_queries_dic=extracted_features_queries_dic,
            shot=shots)

        logs_prob, logs_y = self.run_task(task_dic=tasks,
                                          model=model,
                                          callback=callback)

        if n_ways == 5:
            classes = {
                'nodefect': 0,
                'scaling': 1,
                'efflorescence': 2,
                'cracks': 3,
                'spalling': 4
            }
            right_elems = {
                'nodefect': 0,
                'scaling': 0,
                'efflorescence': 0,
                'cracks': 0,
                'spalling': 0
            }
            total_elems = {
                'nodefect': 0,
                'scaling': 0,
                'efflorescence': 0,
                'cracks': 0,
                'spalling': 0
            }
            detected_elems = {
                'nodefect': 0,
                'scaling': 0,
                'efflorescence': 0,
                'cracks': 0,
                'spalling': 0
            }
        elif n_ways == 2:
            classes = {'nodefect': 0, 'defect': 1}
            total_elems = {'nodefect': 0, 'defect': 1}
            right_elems = {'nodefect': 0, 'defect': 1}
            detected_elems = {'nodefect': 0, 'defect': 1}
        inv_map = {v: k for k, v in classes.items()}

        with open(os.path.join(target_split_dir, "query.csv")) as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=',')
            line_count = 0
            # Checking results:
            if checking == True:
                false_negatives = 0
                true_positives = 0
                false_positives = 0
                true_negatives = 0
                for row in csv_reader:
                    if line_count == 0:
                        pass
                    else:
                        if classes[row[1]] == logs_y[0][line_count - 1]:
                            right_elems[row[1]] += 1
                        else:
                            print(
                                "Confused {} with {} \t Probabilities: {} \t Image: {}"
                                .format(row[1],
                                        inv_map[logs_y[0][line_count - 1]],
                                        logs_prob[0][line_count - 1], row[0]))
                        total_elems[row[1]] += 1
                        detected_elems[inv_map[logs_y[0][line_count - 1]]] += 1
                        if logs_y[0][line_count - 1] != 0:
                            if classes[row[1]] != 0:
                                true_positives += 1
                            else:
                                false_positives += 1
                        else:
                            if classes[row[1]] == 0:
                                true_negatives += 1
                            else:
                                false_negatives += 1
                    line_count += 1
                print(f'Processed {line_count-1} images.')
                global_right = 0
                for class_i in classes:
                    print('Accuracy for {}: {:.4f}'.format(
                        class_i, right_elems[class_i] / total_elems[class_i]))
                    global_right += right_elems[class_i]
                print('Global accuracy: {:.4f}'.format(global_right /
                                                       (line_count - 1)))
                print(
                    'Positive = defect: \n\tFalse positives: {}\n\tFalse negatives: {}\n\tTrue positives: {}\n\tTrue negatives: {}'
                    .format(false_positives, false_negatives, true_positives,
                            true_negatives))
                print('Images from real classes: \t{}'.format(total_elems))
                print('Images from detected classes: \t{}'.format(
                    detected_elems))

            else:
                for row in csv_reader:
                    if line_count == 0:
                        pass
                    else:
                        print("Image: {} \t is {} \t with probabilities: {}".
                              format(row[0],
                                     inv_map[logs_y[0][line_count - 1]],
                                     logs_prob[0][line_count - 1]))
                        total_elems[inv_map[logs_y[0][line_count - 1]]] += 1
                    line_count += 1
                print("\n")
                for class_i in classes:
                    print('Images of class {}: \t{:.0f}'.format(
                        class_i, total_elems[class_i]))

        return results
コード例 #23
0
ファイル: test_mnist.py プロジェクト: sethah/reproducible-ds
    device = torch.device("cuda:0") if use_gpu else torch.device("cpu")

    torch.manual_seed(args.seed)
    if use_gpu:
        torch.cuda.manual_seed(args.seed)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_gpu else {}
    mnist_transforms = transforms.Compose([transforms.ToTensor(),
                                           transforms.Normalize((0.1307,), (0.3081,))])
    ds = datasets.MNIST('./data/', train=False, download=True, transform=mnist_transforms)
    loader = torch.utils.data.DataLoader(ds, batch_size=args.batch_size)

    model = SimpleConvNet().to(device)
    criterion = nn.CrossEntropyLoss().to(device)

    loaded = loaded = utils.load_checkpoint(args.checkpoint_path, best=True)
    model.load_state_dict(loaded['model'])

    with mlflow.start_run():
        # Log our parameters into mlflow
        for key, value in vars(args).items():
            mlflow.log_param(key, value)

        loss = 0.
        correct = 0.
        n = len(loader.dataset)
        model.eval()
        for data, target in loader:
            data, target = data.to(device), target.to(device)
            output = model.forward(data)
            prediction = torch.argmax(output, dim=1)
コード例 #24
0
    def __init__(self, args):

        # set up output directory
        self.output_dir = os.path.join(args.experiment_dir, args.run_name)
        if not os.path.exists(args.experiment_dir):
            os.mkdir(args.experiment_dir)
        if not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        if not os.path.exists(os.path.join(args.experiment_dir, "runs/")):
            os.mkdir(os.path.join(args.experiment_dir, "runs/"))

        # initialize model config
        self.config = vars(args)

        if args.real_run:
            run_name = "{}-{}".format(args.experiment_dir, args.run_name)
        else:
            run_name = None

        # initialize weights and biases
        wandb.init(
            name=run_name,
            notes=args.a_nice_note,
            project="coreference-detection",
            config=self.config,
        )

        # check if there is a model to load
        if args.old_model_dir is not None:
            self.use_old_model = True
            self.load_dir = args.old_model_dir
            load_from_file(os.path.join(self.load_dir, "config.json"),
                           self.config)

            # create vocab
            self.vocab = Vocab()
            self.vocab.load_from_dict(os.path.join(self.load_dir,
                                                   "vocab.json"))
            self.update_vocab = False
            self.config["min_count"] = 1
        else:
            self.use_old_model = False

            self.vocab = None
            self.update_vocab = True

        # train
        self.train_dataset = DialogueDataset(
            os.path.join(self.config["dataset_filename"], "train_data.json"),
            self.config["sentence_len"], self.vocab, self.update_vocab)
        self.data_loader_train = torch.utils.data.DataLoader(
            self.train_dataset, self.config["train_batch_size"], shuffle=True)
        self.config["train_len"] = len(self.train_dataset)

        self.vocab = self.train_dataset.vocab

        # eval
        self.val_dataset = DialogueDataset(
            os.path.join(self.config["dataset_filename"], "val_data.json"),
            self.config["sentence_len"], self.vocab, self.update_vocab)
        self.data_loader_val = torch.utils.data.DataLoader(
            self.val_dataset, self.config["val_batch_size"], shuffle=True)
        self.config["val_len"] = len(self.val_dataset)

        # update, and save vocab
        self.vocab = self.val_dataset.vocab
        self.train_dataset.vocab = self.vocab
        if (self.config["min_count"] > 1):
            self.config["old_vocab_size"] = len(self.vocab)
            self.vocab.prune_vocab(self.config["min_count"])
        self.vocab.save_to_dict(os.path.join(self.output_dir, "vocab.json"))
        self.vocab_size = len(self.vocab)
        self.config["vocab_size"] = self.vocab_size

        # load embeddings
        if self.config["pretrained_embeddings_dir"] is not None:
            pretrained_embeddings = get_pretrained_embeddings(
                self.config["pretrained_embeddings_dir"], self.vocab)
        else:
            pretrained_embeddings = None

        # print and save the config file
        print_config(self.config)
        save_config(os.path.join(self.output_dir, "config.json"), self.config)

        # set device
        self.device = torch.device('cuda')

        # create model
        self.model = Transformer(
            self.config["vocab_size"],
            self.config["label_len"],
            self.config["sentence_len"],
            d_word_vec=self.config["embedding_dim"],
            d_model=self.config["model_dim"],
            d_inner=self.config["inner_dim"],
            n_layers=self.config["num_layers"],
            n_head=self.config["num_heads"],
            d_k=self.config["dim_k"],
            d_v=self.config["dim_v"],
            dropout=self.config["dropout"],
            pretrained_embeddings=pretrained_embeddings).to(self.device)

        # create optimizer
        self.optimizer = torch.optim.Adam(filter(lambda x: x.requires_grad,
                                                 self.model.parameters()),
                                          betas=(0.9, 0.98),
                                          eps=1e-09)

        # load old model, optimizer if there is one
        if self.use_old_model:
            self.model, self.optimizer = load_checkpoint(
                os.path.join(self.load_dir, "model.bin"), self.model,
                self.optimizer, self.device)

        # create a sceduled optimizer object
        self.optimizer = ScheduledOptim(self.optimizer,
                                        self.config["model_dim"],
                                        self.config["warmup_steps"])

        #self.optimizer.optimizer.to(torch.device('cpu'))
        if self.config["weight"] is None:
            self.weight = None
        else:
            self.weight = torch.Tensor(self.config["weight"]).to(self.device)

        wandb.config.update(self.config)
        wandb.watch(self.model)
コード例 #25
0
ファイル: scratch.py プロジェクト: sethah/reproducible-ds
pred = model.forward(im.unsqueeze(0))
pred.shape

model = FullyConvolutional()
pred = model.forward(im.unsqueeze(0))
pred.shape
out_features = pred.view(pred.shape[0], -1).shape[1]
out_features

head = DenseHead(out_features, 10)
pred = head(pred)

model = nn.Sequential(model, head)

loaded = utils.load_checkpoint("models/mnist_fully_conv", best=True)
model.load_state_dict(loaded['model'])

test_ds = datasets.MNIST('./data/', train=False, download=True, transform=mnist_transforms)
print(len(test_ds))
loader = torch.utils.data.DataLoader(test_ds, batch_size=32)

preds = []
targs = []
inps = []
for im, targ in loader:
  pred = model.forward(im)
  preds.append(pred)
  inps.append(im)
  targs.append(targ)
raw_preds = torch.cat(preds).detach().numpy()
コード例 #26
0
    'bidirectional': False,
    'glove_embedding_size': 50,
    'other_embedding_size': 200,
    'embedding_size': 50+200,
    'fix_emb_glove': True,
    'fix_emb_other': True,
    'dp_ratio': 0.3,
    'mlp_hidden_size_list': [32, 32],
    'cuda': torch.cuda.is_available(),
})

if __name__ == "__main__":
    if len(sys.argv) > 1:
        checkpoint_dir = sys.argv[1]
        print('loading from checkpoint in {}'.format(constant.SAVE_DIR+'/'+checkpoint_dir))
        checkpoint = load_checkpoint(checkpoint=checkpoint_dir)
        args = checkpoint['args']

    args.embedding_size = args.glove_embedding_size + args.other_embedding_size
    state = {k: v for k, v in args.items()}
    print(args)

    dm = datamanager.TextDataManager(args)
    args.n_embed = dm.vocab.n_words
    model = text_model.TextClassifier(config=args)

    model.glove_embed.weight.data = l2_normalize(torch.Tensor(dm.vocab.get_glove_embed_vectors()))
    model.other_embed.weight.data = l2_normalize(torch.Tensor(dm.vocab.get_medw2v_embed_vectors()))

    if args.cuda:
        model.cuda()
コード例 #27
0
ファイル: STD.py プロジェクト: MTandHJ/roboc
def load_cfg() -> Tuple[Config, str]:
    from src.dict2obj import Config
    from src.base import Coach
    from src.utils import gpu, set_seed, load_checkpoint

    cfg = Config()
    set_seed(opts.seed)

    # the model and other settings for training
    model = load_model(opts.model)(num_classes=get_num_classes(opts.dataset),
                                   scale=opts.scale)
    device = gpu(model)

    # load the dataset
    trainset = load_dataset(dataset_type=opts.dataset,
                            transform=opts.transform,
                            train=True)
    cfg['trainloader'] = load_dataloader(dataset=trainset,
                                         batch_size=opts.batch_size,
                                         train=True,
                                         show_progress=opts.progress)
    testset = load_dataset(dataset_type=opts.dataset,
                           transform=opts.transform,
                           train=False)
    cfg['testloader'] = load_dataloader(dataset=testset,
                                        batch_size=opts.batch_size,
                                        train=False,
                                        show_progress=opts.progress)
    normalizer = load_normalizer(dataset_type=opts.dataset)

    # load the optimizer and learning_policy
    optimizer = load_optimizer(model=model,
                               optim_type=opts.optimizer,
                               lr=opts.lr,
                               momentum=opts.momentum,
                               betas=(opts.beta1, opts.beta2),
                               weight_decay=opts.weight_decay)
    learning_policy = load_learning_policy(
        optimizer=optimizer,
        learning_policy_type=opts.learning_policy,
        T_max=opts.epochs)

    # generate the path for logging information and saving parameters
    cfg['info_path'], cfg['log_path'] = generate_path(
        method=METHOD,
        dataset_type=opts.dataset,
        model=opts.model,
        description=opts.description)
    if opts.resume:
        cfg['start_epoch'] = load_checkpoint(path=cfg.info_path,
                                             model=model,
                                             optimizer=optimizer,
                                             lr_scheduler=learning_policy)
    else:
        cfg['start_epoch'] = 0

    cfg['coach'] = Coach(model=model,
                         device=device,
                         loss_func=load_loss_func(opts.loss)(model=model),
                         normalizer=normalizer,
                         optimizer=optimizer,
                         learning_policy=learning_policy)

    # for validation
    cfg['valider'] = load_valider(model=model,
                                  device=device,
                                  dataset_type=opts.dataset)
    return cfg
def job(tuning, params_path, devices, resume, save_interval):
    global params
    if tuning:
        with open(params_path, 'r') as f:
            params = json.load(f)
        mode_str = 'tuning'
        setting = '_'.join(f'{tp}-{params[tp]}'
                           for tp in params['tuning_params'])
    else:
        mode_str = 'train'
        setting = ''

    # パラメーターを変えるときにseedも変えたい(seed averagingの効果を期待)
    seed = sum(ord(_) for _ in str(params.values()))
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = False

    exp_path = ROOT + f'experiments/{params["ex_name"]}/'
    os.environ['CUDA_VISIBLE_DEVICES'] = devices

    logger, writer = utils.get_logger(
        log_dir=exp_path + f'{mode_str}/log/{setting}',
        tensorboard_dir=exp_path + f'{mode_str}/tf_board/{setting}')

    if params['augmentation'] == 'soft':
        params['scale_limit'] = 0.2
        params['brightness_limit'] = 0.1
    elif params['augmentation'] == 'middle':
        params['scale_limit'] = 0.3
        params['shear_limit'] = 4
        params['brightness_limit'] = 0.1
        params['contrast_limit'] = 0.1
    else:
        raise ValueError

    train_transform, eval_transform = data_utils.build_transforms(
        scale_limit=params['scale_limit'],
        shear_limit=params['shear_limit'],
        brightness_limit=params['brightness_limit'],
        contrast_limit=params['contrast_limit'],
    )

    data_loaders = data_utils.make_train_loaders(
        params=params,
        data_root=ROOT + 'input/' + params['data'],
        train_transform=train_transform,
        eval_transform=eval_transform,
        scale='S2',
        test_size=0,
        class_topk=params['class_topk'],
        num_workers=8)

    model = models.LandmarkFishNet(
        n_classes=params['class_topk'],
        model_name=params['model_name'],
        pooling_strings=params['pooling'].split(','),
        loss_module=params['loss'],
        s=params['s'],
        margin=params['margin'],
        theta_zero=params['theta_zero'],
        use_fc=params['use_fc'],
        fc_dim=params['fc_dim'],
    ).cuda()
    optimizer = utils.get_optim(params, model)
    criterion = nn.CrossEntropyLoss()
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=params['epochs'] * len(data_loaders['train']),
        eta_min=3e-6)
    start_epoch = 0

    if len(devices.split(',')) > 1:
        model = nn.DataParallel(model)

    if resume is not None:
        ckpt = utils.load_checkpoint(path=resume,
                                     model=model,
                                     optimizer=optimizer,
                                     epoch=True)
        model, optimizer, start_epoch = ckpt['model'], ckpt[
            'optimizer'], ckpt['epoch'] + 1

        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=params['epochs'] * len(data_loaders['train']),
            eta_min=3e-6,
            last_epoch=start_epoch * len(data_loaders['train']))

        setting = 'scaleup_' + resume.split('/')[-1].replace('.pth', '')

        # パラメーターを変えるときにseedも変えたい(seed averagingの効果を期待)
        seed = sum(ord(_) for _ in str(params.values())) + 12345
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.benchmark = False

        train_transform, eval_transform = data_utils.build_transforms(
            scale_limit=params['scale_limit'],
            shear_limit=params['shear_limit'],
            brightness_limit=params['brightness_limit'],
            contrast_limit=params['contrast_limit'],
        )
        data_loaders = data_utils.make_verified_train_loaders(
            params=params,
            data_root=ROOT + 'input/' + params['data'],
            train_transform=train_transform,
            eval_transform=eval_transform,
            scale='M2',
            test_size=0.0,
            num_workers=8)
        batch_norm.freeze_bn(model)

    for epoch in range(start_epoch, params['epochs']):

        logger.info(
            f'Epoch {epoch}/{params["epochs"]} | lr: {optimizer.param_groups[0]["lr"]}'
        )

        # ============================== train ============================== #
        model.train(True)

        losses = utils.AverageMeter()
        prec1 = utils.AverageMeter()

        for i, (_, x, y) in tqdm(enumerate(data_loaders['train']),
                                 total=len(data_loaders['train']),
                                 miniters=None,
                                 ncols=55):
            x = x.to('cuda')
            y = y.to('cuda')

            outputs = model(x, y)
            loss = criterion(outputs, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            acc = metrics.accuracy(outputs, y)
            losses.update(loss.item(), x.size(0))
            prec1.update(acc, x.size(0))

            if i % 100 == 99:
                logger.info(
                    f'{epoch+i/len(data_loaders["train"]):.2f}epoch | {setting} acc: {prec1.avg}'
                )

        train_loss = losses.avg
        train_acc = prec1.avg

        writer.add_scalars('Loss', {'train': train_loss}, epoch)
        writer.add_scalars('Acc', {'train': train_acc}, epoch)
        writer.add_scalar('LR', optimizer.param_groups[0]['lr'], epoch)

        if (epoch + 1) == params['epochs'] or (epoch + 1) % save_interval == 0:
            output_file_name = exp_path + f'ep{epoch}_' + setting + '.pth'
            utils.save_checkpoint(path=output_file_name,
                                  model=model,
                                  epoch=epoch,
                                  optimizer=optimizer,
                                  params=params)

    model = model.module
    datasets = ('roxford5k', 'rparis6k')
    results = eval_datasets(model,
                            datasets=datasets,
                            ms=False,
                            tta_gem_p=1.0,
                            logger=logger)

    if tuning:
        tuning_result = {}
        for d in datasets:
            for key in ['mapE', 'mapM', 'mapH']:
                mapE, mapM, mapH, mpE, mpM, mpH, kappas = results[d]
                tuning_result[d + '-' + key] = [eval(key)]
        utils.write_tuning_result(params, tuning_result,
                                  exp_path + 'tuning/results.csv')
コード例 #29
0
    pp.pprint(APs)
    print('\nMean Average Precision (mAP): %.3f' % mAP)


if __name__ == '__main__':
    # Get eval arguments
    args = get_eval_argument()
    print('Arguments for evaluation : ', args)

    # Set cuda device
    set_cuda_dev(args.ngpu)

    # Load model checkpoint that is to be evaluated
    model = SSD('test', args)
    checkpoint = args.trained_model
    _, model, _ = load_checkpoint(model, args.trained_model_path + checkpoint)
    model = model.cuda()
    # Switch to eval mode
    model.eval()

    # Load test datas
    test_dataset = VOCxx('test',
                         args.dataroot,
                         args.datayears,
                         args.datanames,
                         discard_difficult=args.discard_difficult,
                         use_augment=False)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
コード例 #30
0
    def __init__(self, opt):
        super(Model, self).__init__()

        self.gpu_id = opt.gpu_ids[0]
        self.weights_path = os.path.join(opt.experiment_path, 'checkpoints')

        # Generator
        self.gen_B = Generator(opt, 'B', opt.gen_type_name_B)
        
        self.noise_size = (opt.batch_size, self.gen_B.noise_channels)

        # Discriminator
        if opt.dis_type_names_B: self.dis_B = DiscriminatorWrapper(opt, 'B')

        # Load weights
        utils.load_checkpoint(self, opt.which_epoch, opt.pretrained_gen_path)

        # Print architectures
        print('\nGen A to B\n')
        num_params = 0
        for p in self.gen_B.parameters():
            num_params += p.numel()
        print(self.gen_B)
        print('Number of parameters: %d' % num_params)

        self.X_min = torch.from_numpy(np.load(os.path.join(input_path, "data_min.npy")))
        self.X_min = self.X_min.cuda()

        self.X_max = torch.from_numpy(np.load(os.path.join(input_path, "data_max.npy")))
        self.X_max = self.X_max.cuda()
        
        self.X_mean = torch.from_numpy(np.load(os.path.join(input_path, "data_mean.npy")))
        self.X_mean = self.X_mean.cuda()

        self.X_std = torch.from_numpy(np.load(os.path.join(input_path, "data_std.npy")))
        self.X_std = self.X_std.cuda()
        
        self.y_std = torch.from_numpy(np.load(os.path.join(input_path, "target_mean.npy")))
        self.y_std = self.y_std.cuda()

        self.y_mean = torch.from_numpy(np.load(os.path.join(input_path, "target_std.npy")))
        self.y_mean = self.y_mean.cuda()

        self.gen_params = self.gen_B.parameters()

        # Discriminator
        if opt.dis_type_names_B:

            print('\nDis B\n')
            num_params = 0
            for p in self.dis_B.parameters():
                num_params += p.numel()
            print(self.dis_B)
            print('Number of parameters: %d' % num_params)

            self.dis_params = self.dis_B.parameters()

            # Losses
            self.crit_dis_B = DiscriminatorLoss(opt, self.dis_B)

        # If an encoder is required, load the weights
        if hasattr(self, 'dis_B') and self.dis_B.use_encoder:

            # Load encoder
            if opt.enc_type[:5] == 'vgg19':
                layers = '1,6,11,20,29'

            self.enc = FeatureExtractor(
                input_range='tanh',
                net_type=opt.enc_type,
                layers=layers).eval()

            print('')
            print(self.enc)
            print('')

        else:

            self.enc = None

        # Pretrained aux classifier/regressor
        if opt.pretrained_aux_path:

            self.aux = torch.load(opt.pretrained_aux_path)

            self.crit_aux_B = utils.get_criterion(
                opt.aux_loss_type, 
                opt.gen_aux_loss_weight,
                self.enc)

            print('')
            print(self.aux)
            print('')

        self.up = nn.Upsample(
            scale_factor=1, 
            mode='bilinear',
            align_corners=False)

        # Load onto gpus
        self.gen_B = nn.DataParallel(self.gen_B.cuda(self.gpu_id), opt.gpu_ids)
        if opt.dis_type_names_B:
        	self.dis_B = nn.DataParallel(self.dis_B.cuda(self.gpu_id), opt.gpu_ids)
        if hasattr(self, 'aux'):
            self.aux = nn.DataParallel(self.aux.cuda(self.gpu_id), opt.gpu_ids)
        if self.enc is not None: 
            self.enc = nn.DataParallel(self.enc.cuda(self.gpu_id), opt.gpu_ids)