Esempio n. 1
0
def proxyless_base(num_classes,
                   loss,
                   pretrained=True,
                   net_config=None,
                   net_weight=None):
    assert net_config is not None, "Please input a network config"
    net_config_path = download_url(net_config)
    net_config_json = json.load(open(net_config_path, 'r'))
    net = ProxylessNASNets.build_from_config(net_config_json, num_classes,
                                             loss)

    if 'bn' in net_config_json:
        net.set_bn_param(bn_momentum=net_config_json['bn']['momentum'],
                         bn_eps=net_config_json['bn']['eps'])
    else:
        net.set_bn_param(bn_momentum=0.1, bn_eps=1e-3)

    if pretrained:
        assert net_weight is not None, "Please specify network weights"
        init_path = download_url(net_weight)
        load_weights(net, init_path)

    return net
Esempio n. 2
0
def mnasnet(num_classes, loss, pretrained='imagenet', **kwargs):
    model = MnasNet(num_classes, loss, **kwargs)
    if pretrained == 'imagenet':
        model_url = model_urls['imagenet']
        load_weights(model, model_url)
    return model
Esempio n. 3
0
def main():
    global args

    torch.manual_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    log_name = 'log_train_{}.txt'.format(time.strftime("%Y-%m-%d-%H-%M-%S"))
    if args.evaluate:
        log_name.replace('train', 'test')
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print(' '.join(sys.argv))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU, however, GPU is highly recommended")

    print("Initializing image data manager")

    dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
    if hasattr(dm, 'lfw_dataset'):
        lfw = dm.lfw_dataset
        print('LFW dataset is used!')
    else:
        lfw = None

    trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders()

    num_train_pids = dm.num_train_pids

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(
        name=args.arch,
        num_classes=num_train_pids,
        loss={'xent', 'htri'},
        pretrained=False if args.load_weights else 'imagenet',
        grayscale=args.grayscale,
        normalize_embeddings=args.normalize_embeddings,
        normalize_fc=args.normalize_fc,
        convbn=args.convbn)
    print("Model size: {:.3f} M".format(count_num_param(model)))

    count_flops(model, args.height, args.width, args.grayscale)

    if args.load_weights and check_isfile(args.load_weights):
        # load pretrained weights but ignore layers that don't match in size
        load_weights(model, args.load_weights)
        print("Loaded pretrained weights from '{}'".format(args.load_weights))

    if args.resume and check_isfile(args.resume):
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch'] + 1
        print("Loaded checkpoint from '{}'".format(args.resume))

        print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch,
                                                      checkpoint['rank1']))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
        model = model.cuda()

    if args.evaluate:
        print("Evaluate only")

        for name in args.target_names:
            if not 'lfw' in name.lower():
                print("Evaluating {} ...".format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                distmat = test(args,
                               model,
                               queryloader,
                               galleryloader,
                               use_gpu,
                               return_distmat=True)

                if args.visualize_ranks:
                    visualize_ranked_results(
                        distmat,
                        dm.return_testdataset_by_name(name),
                        save_dir=osp.join(args.save_dir, 'ranked_results',
                                          name),
                        topk=20)
            else:
                model.eval()
                same_acc, diff_acc, all_acc, auc, thresh = evaluate(
                    args,
                    dm.lfw_dataset,
                    model,
                    compute_embeddings_lfw,
                    args.test_batch_size,
                    verbose=False,
                    show_failed=args.show_failed)
                log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(
                    same_acc, diff_acc))
                log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
                log.info('Validation AUC: {0:.4f}'.format(auc))
                log.info('Estimated threshold: {0:.4f}'.format(thresh))
        return

    criterions = choose_losses(args, dm, model, use_gpu)

    if not args.evaluate and len(criterions) == 0:
        raise AssertionError('No loss functions were chosen!')
    optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))

    if args.load_optim:
        checkpoint = torch.load(args.load_weights)
        optimizer.load_state_dict(checkpoint['optim'])
        print("Loaded optimizer from '{}'".format(args.load_weights))

    for param_group in optimizer.param_groups:
        param_group['lr'] = args.lr
        param_group['weight_decay'] = args.weight_decay

    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    start_time = time.time()
    ranklogger = RankLogger(args.source_names, args.target_names)
    train_time = 0
    train_writer = SummaryWriter(osp.join(args.save_dir, 'train_log'))
    test_writer = SummaryWriter(osp.join(args.save_dir, 'test_log'))
    print("=> Start training")

    if args.fixbase_epoch > 0:
        print(
            "Train {} for {} epochs while keeping other layers frozen".format(
                args.open_layers, args.fixbase_epoch))
        initial_optim_state = optimizer.state_dict()

        for epoch in range(args.fixbase_epoch):
            start_train_time = time.time()
            train(epoch,
                  model,
                  criterions,
                  optimizer,
                  trainloader,
                  use_gpu,
                  train_writer,
                  fixbase=True,
                  lfw=lfw)
            train_time += round(time.time() - start_train_time)

            for name in args.target_names:
                if not 'lfw' in name.lower():
                    print("Evaluating {} ...".format(name))
                    queryloader = testloader_dict[name]['query']
                    galleryloader = testloader_dict[name]['gallery']
                    testloader = testloader_dict[name]['test']
                    criteria = None
                    rank1 = test(args,
                                 model,
                                 queryloader,
                                 galleryloader,
                                 use_gpu,
                                 testloader=testloader,
                                 criterions=criteria)
                else:
                    model.eval()
                    same_acc, diff_acc, all_acc, auc, thresh = evaluate(
                        args,
                        dm.lfw_dataset,
                        model,
                        compute_embeddings_lfw,
                        args.test_batch_size,
                        verbose=False,
                        show_failed=args.show_failed)
                    print('Validation accuracy: {0:.4f}, {1:.4f}'.format(
                        same_acc, diff_acc))
                    print('Validation accuracy mean: {0:.4f}'.format(all_acc))
                    print('Validation AUC: {0:.4f}'.format(auc))
                    print('Estimated threshold: {0:.4f}'.format(thresh))
                    rank1 = all_acc

        print("Done. All layers are open to train for {} epochs".format(
            args.max_epoch))
        optimizer.load_state_dict(initial_optim_state)

    for epoch in range(args.start_epoch, args.max_epoch):
        for criterion in criterions:
            criterion.train_stats.reset()

        start_train_time = time.time()
        train(epoch,
              model,
              criterions,
              optimizer,
              trainloader,
              use_gpu,
              train_writer,
              lfw=lfw)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (
                epoch + 1) % args.eval_freq == 0 or (epoch +
                                                     1) == args.max_epoch:
            num_iter = (epoch + 1) * len(trainloader)
            if not args.no_train_quality:
                for name in args.source_names:
                    print(
                        "Measure quality on the {} train set...".format(name))
                    queryloader = trainloader_dict[name]['query']
                    galleryloader = trainloader_dict[name]['gallery']
                    rank1 = test(args, model, queryloader, galleryloader,
                                 use_gpu)
                    train_writer.add_scalar('rank1/{}'.format(name), rank1,
                                            num_iter)

            print("=> Test")

            for name in args.target_names:
                if not 'lfw' in name.lower():
                    print("Evaluating {} ...".format(name))
                    queryloader = testloader_dict[name]['query']
                    galleryloader = testloader_dict[name]['gallery']
                    testloader = testloader_dict[name]['test']
                    criteria = criterions
                    if args.no_loss_on_val:
                        criteria = None
                    rank1 = test(args,
                                 model,
                                 queryloader,
                                 galleryloader,
                                 use_gpu,
                                 testloader=testloader,
                                 criterions=criteria)
                    test_writer.add_scalar('rank1/{}'.format(name), rank1,
                                           num_iter)
                    if not args.no_loss_on_val:
                        for criterion in criterions:
                            test_writer.add_scalar(
                                'loss/{}'.format(criterion.name),
                                criterion.test_stats.avg, num_iter)
                            criterion.test_stats.reset()
                    ranklogger.write(name, epoch + 1, rank1)
                else:
                    model.eval()
                    same_acc, diff_acc, all_acc, auc, thresh = evaluate(
                        args,
                        dm.lfw_dataset,
                        model,
                        compute_embeddings_lfw,
                        args.test_batch_size,
                        verbose=False,
                        show_failed=args.show_failed)
                    print('Validation accuracy: {0:.4f}, {1:.4f}'.format(
                        same_acc, diff_acc))
                    print('Validation accuracy mean: {0:.4f}'.format(all_acc))
                    print('Validation AUC: {0:.4f}'.format(auc))
                    print('Estimated threshold: {0:.4f}'.format(thresh))
                    test_writer.add_scalar('Accuracy/Val_same_accuracy',
                                           same_acc, num_iter)
                    test_writer.add_scalar('Accuracy/Val_diff_accuracy',
                                           diff_acc, num_iter)
                    test_writer.add_scalar('Accuracy/Val_accuracy', all_acc,
                                           num_iter)
                    test_writer.add_scalar('Accuracy/AUC', auc, num_iter)
                    rank1 = all_acc
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_dict = {
                'state_dict': state_dict,
                'epoch': epoch,
                'optim': optimizer.state_dict()
            }

            if len(args.target_names):
                save_dict['rank1'] = rank1

            save_checkpoint(
                save_dict, False,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
    ranklogger.show_summary()
Esempio n. 4
0
def shufflenet(num_classes, loss, pretrained='imagenet', **kwargs):
    model = ShuffleNet(num_classes, loss, **kwargs)
    if pretrained == 'imagenet':
        load_weights(model, model_urls['imagenet'])
    return model
Esempio n. 5
0
def main():
    global args

    torch.manual_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    log_name = 'log_test.txt'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU, however, GPU is highly recommended")

    print("Initializing image data manager")
    if not args.convert_to_onnx:  # and not args.infer:
        dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
        trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(
        )

    num_train_pids = 100

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(
        name=args.arch,
        num_classes=num_train_pids,
        loss={'xent', 'htri'},
        pretrained=False if args.load_weights else 'imagenet',
        grayscale=args.grayscale,
        ceil_mode=not args.convert_to_onnx,
        infer=True,
        bits=args.bits,
        normalize_embeddings=args.normalize_embeddings,
        normalize_fc=args.normalize_fc,
        convbn=args.convbn)
    print("Model size: {:.3f} M".format(count_num_param(model)))

    if args.load_weights and check_isfile(args.load_weights):
        # load pretrained weights but ignore layers that don't match in size
        load_weights(model, args.load_weights)
        print("Loaded pretrained weights from '{}'".format(args.load_weights))

    if args.absorb_bn:
        search_absorbed_bn(model)

    if args.quantization or args.save_quantized_model:
        from gap_quantization.quantization import ModelQuantizer
        from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files

        if args.quant_data_dir is None:
            raise AttributeError('quant-data-dir argument is required.')

        num_channels = 1 if args.grayscale else 3
        cfg = {
            "bits":
            args.bits,  # number of bits to store weights and activations
            "accum_bits":
            32,  # number of bits to store intermediate convolution result
            "signed": True,  # use signed numbers
            "save_folder": args.save_dir,  # folder to save results
            "data_source": args.
            quant_data_dir,  # folder with images to collect dataset statistics
            "use_gpu": False,  # use GPU for inference
            "batch_size": 1,
            "num_workers": 0,  # number of workers for PyTorch dataloader
            "verbose": True,
            "save_params": args.
            save_quantized_model,  # save quantization parameters to the file
            "quantize_forward":
            True,  # replace usual convs, poolings, ... with GAP-like ones
            "num_input_channels": num_channels,
            "raw_input": args.no_normalize,
            "double_precision":
            args.double_precision  # use double precision convolutions
        }

        model = model.cpu()
        quantizer = ModelQuantizer(
            model, cfg, dm.transform_test
        )  # transform test is OK if we use args.no_normalize
        quantizer.quantize_model(
        )  # otherwise we need to add QuantizeInput operation

        if args.infer:
            if args.image_path == '':
                raise AttributeError('Image for inference is required')

            quantizer.dump_activations(args.image_path,
                                       dm.transform_test,
                                       save_dir=os.path.join(
                                           args.save_dir, 'activations_dump'))
            dump_quant_params(args.save_dir, args.convbn)
            if args.convbn:
                remove_extra_dump(
                    os.path.join(args.save_dir, 'activations_dump'))
            remove_cat_files(args.save_dir)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")

        for name in args.target_names:
            if not 'lfw' in name.lower():
                print("Evaluating {} ...".format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                distmat = test(args,
                               model,
                               queryloader,
                               galleryloader,
                               use_gpu,
                               return_distmat=True)

                if args.visualize_ranks:
                    visualize_ranked_results(
                        distmat,
                        dm.return_testdataset_by_name(name),
                        save_dir=osp.join(args.save_dir, 'ranked_results',
                                          name),
                        topk=20)

            else:
                model.eval()
                same_acc, diff_acc, all_acc, auc, thresh = evaluate(
                    args,
                    dm.lfw_dataset,
                    model,
                    compute_embeddings_lfw,
                    args.test_batch_size,
                    verbose=False,
                    show_failed=args.show_failed,
                    load_embeddings=args.load_embeddings)
                log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(
                    same_acc, diff_acc))
                log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
                log.info('Validation AUC: {0:.4f}'.format(auc))
                log.info('Estimated threshold: {0:.4f}'.format(thresh))
                #roc_auc(model, '/home/maxim/data/lfw/pairsTest.txt', '/media/slow_drive/cropped_lfw', args, use_gpu)
        return
Esempio n. 6
0
def espnet(num_classes, loss, pretrained='imagenet', **kwargs):
    model = ESPNet_Encoder(num_classes, loss)
    if pretrained == 'imagenet':
        load_weights(model, model_urls['imagenet'])
    return model
Esempio n. 7
0
def mobilenetv2(num_classes, loss, pretrained='imagenet', **kwargs):
    model = MobileNetV2(num_classes, loss, **kwargs)
    if pretrained == 'imagenet':
        model_url = model_urls['imagenet']
        load_weights(model, model_url)
    return model
Esempio n. 8
0
def choose_losses(args, data_manager, model, use_gpu):
    criteria = []

    if args.xent_loss == 'am':
        criteria.append(Criterion('am', AMSoftmaxLoss(), args.weight_xent))
    elif args.xent_loss is not None:
        criteria.append(
            Criterion(
                'xent',
                CrossEntropyLoss(num_classes=data_manager.num_train_pids,
                                 use_gpu=use_gpu,
                                 label_smooth=args.label_smooth),
                args.weight_xent))

    if args.euclid_loss:
        if args.euclid_loss == 'triplet':
            loss = TripletLoss(margin=args.margin)
        elif args.euclid_loss == 'lifted':
            loss = LiftedLoss(margin=args.margin)
        else:
            raise KeyError('Unknown euclidean loss: {}'.format(
                args.euclid_loss))
        criteria.append(Criterion('euclid', loss, args.weight_euclid))

    if args.face_loss:
        input_channels = 3
        if args.grayscale:
            input_channels = 1
        features = model(
            torch.zeros(1, input_channels, args.height, args.width))
        feature_vector_size = features.shape[1]
        if args.face_loss == 'arc':
            loss = ArcFace(feature_vector_size, data_manager.num_train_pids)
        elif args.face_loss == 'cos':
            loss = CosFace(feature_vector_size, data_manager.num_train_pids)
        elif args.face_loss == 'sphere':
            loss = SphereFace(feature_vector_size, data_manager.num_train_pids)
        elif args.face_loss == 'am':
            loss = AMSoftmaxLoss()
        else:
            raise KeyError('Unknown face loss: {}'.format(args.face_loss))
        if use_gpu:
            loss = loss.cuda()
        criteria.append(Criterion('face', loss, args.weight_face))

    if args.teacher_arch:
        print("Initializing teacher model: {}".format(args.teacher_arch))
        teacher_model = models.init_model(
            name=args.teacher_arch,
            num_classes=data_manager.num_train_pids,
            loss={'xent', 'htri'},
            pretrained=False)
        if not args.load_teacher_weights or not check_isfile(
                args.load_teacher_weights):
            print('Teacher model checkpoint wasn\'t provided!')
            return
        load_weights(teacher_model, args.load_teacher_weights)
        print("Loaded pretrained weights from '{}' for teacher model".format(
            args.load_teacher_weights))
        if use_gpu:
            teacher_model = nn.DataParallel(teacher_model).cuda()
        criteria.append(
            Criterion('distill', DistillLoss(teacher_model),
                      args.weight_distill))

    return criteria
Esempio n. 9
0
def dilated_thin_mobilenet(num_classes, loss, pretrained='imagenet', **kwargs):
    model = DilatedThinMobileNet(num_classes, loss, **kwargs)
    if pretrained == 'imagenet':
        model_url = pretrained_settings['mobilenet']['imagenet']['url']
        load_weights(model, model_url)
    return model
Esempio n. 10
0
def shufflenetv2small(num_classes, loss, pretrained='imagenet', **kwargs):
    model = ShuffleNetV2(num_classes, loss, width_mult=0.5)
    if pretrained == 'imagenet':
        model_url = model_urls['imagenet_small']
        load_weights(model, model_url)
    return model