示例#1
0
def main(args):
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    sys.stderr = Logger(osp.join(args.logs_dir, 'err.txt'))
    lz.init_dev(args.gpu)
    print('config is {}'.format(vars(args)))
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'

    dataset = datasets.create(args.dataset, root=args.root + '/' + args.dataset,
                              split_id=args.split_id, mode=args.dataset_mode)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = dataset.trainval
    num_classes = dataset.num_trainval_ids

    train_transformer = T.Compose([
        T.RandomCropFlip(args.height, args.width, area=args.area),
        T.ToTensor(),
        normalizer,
    ])

    test_transformer = T.Compose([
        T.RectScale(args.height, args.width),
        T.ToTensor(),
        normalizer,
    ])

    train_loader = DataLoader(
        Preprocessor(train_set, root=dataset.images_dir,
                     transform=train_transformer,
                     ),
        batch_size=args.batch_size, num_workers=args.workers,
        sampler=RandomIdentityWeightedSampler(
            train_set, args.num_instances,
            batch_size=args.batch_size,
            rand_ratio=args.rand_ratio,
        ),
        # shuffle=True,
        pin_memory=args.pin_memory, drop_last=True)

    test_loader = DataLoader(
        Preprocessor(dataset.val,
                     root=dataset.images_dir,
                     transform=test_transformer,
                     ),
        batch_size=args.batch_size, num_workers=args.workers,
        shuffle=False, pin_memory=False)

    # Create model
    model = models.create(args.arch,
                          dropout=args.dropout,
                          pretrained=args.pretrained,
                          block_name=args.block_name,
                          block_name2=args.block_name2,
                          num_features=args.num_classes,
                          num_classes=num_classes,
                          num_deform=args.num_deform,
                          fusion=args.fusion,
                          )

    print(model)
    param_mb = sum(p.numel() for p in model.parameters()) / 1000000.0
    logging.info('    Total params: %.2fM' % (param_mb))

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        while not osp.exists(args.resume):
            lz.logging.warning(' no chkpoint {} '.format(args.resume))
            time.sleep(20)
        if torch.cuda.is_available():
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(args.resume, map_location='cpu')
        # model.load_state_dict(checkpoint['state_dict'])
        db_name = args.logs_dir + '/' + args.logs_dir.split('/')[-1] + '.h5'
        load_state_dict(model, checkpoint['state_dict'])
        with lz.Database(db_name) as db:
            if 'cent' in checkpoint:
                db['cent'] = to_numpy(checkpoint['cent'])
            db['xent'] = to_numpy(checkpoint['state_dict']['embed2.weight'])
        if args.restart:
            start_epoch_ = checkpoint['epoch']
            best_top1_ = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}"
                  .format(start_epoch_, best_top1_))
        else:
            start_epoch = checkpoint['epoch']
            best_top1 = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}"
                  .format(start_epoch, best_top1))
    if args.gpu is None:
        model = nn.DataParallel(model)
    elif len(args.gpu) == 1:
        model = nn.DataParallel(model).cuda()
    else:
        model = nn.DataParallel(model, device_ids=range(len(args.gpu))).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, gpu=args.gpu, conf=args.eval_conf, args=args)
    if args.evaluate:
        res = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric,
                                 final=True, prefix='test')

        lz.logging.info('eval {}'.format(res))
        return res
    # Criterion
    if not args.xent_smooth:
        xent = nn.CrossEntropyLoss()
    else:
        xent = CrossEntropyLabelSmooth(num_classes=num_classes)
    setattr(xent, 'name', 'xent')

    criterion = [TripletLoss(margin=args.margin, mode='hard', args=args),
                 CenterLoss(num_classes=num_classes, feat_dim=args.num_classes,
                            margin2=args.margin2,
                            margin3=args.margin3, mode=args.mode,
                            push_scale=args.push_scale,
                            args=args),
                 xent
                 ]
    if args.gpu is not None:
        criterion = [c.cuda() for c in criterion]
    # Optimizer
    fast_params = []
    for name, param in model.named_parameters():
        if name == 'module.embed1.weight' or name == 'module.embed2.weight':
            fast_params.append(param)
    fast_params_ids = set(map(fid, fast_params))
    normal_params = [p for p in model.parameters() if fid(p) not in fast_params_ids]
    param_groups = [
        {'params': fast_params, 'lr_mult': args.lr_mult},
        {'params': normal_params, 'lr_mult': 1.},
    ]
    if args.optimizer_cent == 'sgd':
        optimizer_cent = torch.optim.SGD(criterion[1].parameters(), lr=args.lr_cent, )
    else:
        optimizer_cent = torch.optim.Adam(criterion[1].parameters(), lr=args.lr_cent, )
    if args.optimizer == 'adam':
        optimizer = torch.optim.Adam(
            # model.parameters(),
            param_groups,
            lr=args.lr,
            betas=args.adam_betas,
            eps=args.adam_eps,  # adam hyperparameter
            weight_decay=args.weight_decay)
    elif args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(
            # filter(lambda p: p.requires_grad, model.parameters()),
            param_groups,
            lr=args.lr,
            weight_decay=args.weight_decay, momentum=0.9,
            nesterov=True)
    else:
        raise NotImplementedError

    if args.cls_pretrain:
        args_cp = copy.deepcopy(args)
        args_cp.cls_weight = 1
        args_cp.tri_weight = 0
        trainer = XentTrainer(model, criterion, dbg=False,
                              logs_at=args_cp.logs_dir + '/vis', args=args_cp)
        for epoch in range(start_epoch, args_cp.epochs):
            hist = trainer.train(epoch, train_loader, optimizer)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'cent': criterion[1].centers,
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, True, fpath=osp.join(args.logs_dir, 'checkpoint.{}.pth'.format(epoch)))  #
            print('Finished epoch {:3d} hist {}'.
                  format(epoch, hist))
    # Trainer
    trainer = TCXTrainer(model, criterion, dbg=True,
                         logs_at=args.logs_dir + '/vis', args=args, dop_info=dop_info)

    # Schedule learning rate
    def adjust_lr(epoch, optimizer=optimizer, base_lr=args.lr, steps=args.steps, decay=args.decay):

        exp = len(steps)
        for i, step in enumerate(steps):
            if epoch < step:
                exp = i
                break
        lr = base_lr * decay ** exp

        lz.logging.info('use lr {}'.format(lr))
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr * param_group.get('lr_mult', 1)

    def adjust_bs(epoch, args):
        if args.batch_size_l == []:
            return args
        res = 0
        for i, step in enumerate(args.bs_steps):
            if epoch > step:
                res = i + 1
        print(epoch, res)
        if res >= len(args.num_instances_l):
            res = -1
        args.batch_size = args.batch_size_l[res]
        args.num_instances = args.num_instances_l[res]
        return args

    writer = SummaryWriter(args.logs_dir)
    writer.add_scalar('param', param_mb, global_step=0)

    # schedule = CyclicLR(optimizer)
    schedule = None
    # Start training
    for epoch in range(start_epoch, args.epochs):
        # warm up
        # mAP, acc,rank5 = evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)

        adjust_lr(epoch=epoch)
        args = adjust_bs(epoch, args)

        hist = trainer.train(epoch, train_loader, optimizer, print_freq=args.print_freq, schedule=schedule,
                             optimizer_cent=optimizer_cent)
        for k, v in hist.items():
            writer.add_scalar('train/' + k, v, epoch)
        writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
        writer.add_scalar('bs', args.batch_size, epoch)
        writer.add_scalar('num_instances', args.num_instances, epoch)

        if not args.log_middle:
            continue
        if epoch < args.start_save:
            continue
        if epoch % 15 == 0:
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'cent': criterion[1].centers,
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, False, fpath=osp.join(args.logs_dir, 'checkpoint.{}.pth'.format(epoch)))

        if epoch not in args.log_at:
            continue

        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'cent': criterion[1].centers,
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, False, fpath=osp.join(args.logs_dir, 'checkpoint.{}.pth'.format(epoch)))

        # res = evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        # for n, v in res.items():
        #     writer.add_scalar('train/'+n, v, epoch)

        res = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric, epoch=epoch)
        for n, v in res.items():
            writer.add_scalar('test/' + n, v, epoch)

        top1 = res['top-1']
        is_best = top1 > best_top1

        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'cent': criterion[1].centers,
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.{}.pth'.format(epoch)))  #

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    res = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
    for n, v in res.items():
        writer.add_scalar('test/' + n, v, args.epochs)

    if osp.exists(osp.join(args.logs_dir, 'model_best.pth')) and args.test_best:
        print('Test with best model:')
        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth'))
        model.module.load_state_dict(checkpoint['state_dict'])
        metric.train(model, train_loader)
        res = evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric, final=True)
        for n, v in res.items():
            writer.add_scalar('test/' + n, v, args.epochs + 1)
        lz.logging.info('final eval is {}'.format(res))

    writer.close()
    json_dump(res, args.logs_dir + '/res.json', 'w')
    return res
示例#2
0
from easydict import EasyDict as edict
from pathlib import Path
import torch
import lz
from torch.nn import CrossEntropyLoss
from torchvision import transforms as trans

lz.init_dev(lz.get_dev(n=1))
# lz.init_dev((0, 1))


def get_config(training=True):
    conf = edict()

    dbg = lz.dbg
    if dbg:
        # conf.num_steps_per_epoch = 38049
        conf.num_steps_per_epoch = 3
        # conf.no_eval = False
        conf.no_eval = True
    else:
        conf.num_steps_per_epoch = 38049
        # conf.num_steps_per_epoch = 3
        conf.no_eval = False
        # conf.no_eval = True
    conf.loss = 'softmax'  # softmax arcface
    conf.fgg = ''  # g gg ''
    conf.fgg_wei = 0  # 1
    conf.start_eval = False

    conf.data_path = Path('/data2/share/')
示例#3
0
import lz
from lz import *
from torch.nn import CrossEntropyLoss
from tools.vat import VATLoss
from torchvision import transforms as trans

# todo label smooth

dist = False
num_devs = 3
if dist:
    num_devs = 1
else:
    pass
    # lz.init_dev(3)
    lz.init_dev(lz.get_dev(num_devs))

conf = edict()
conf.num_workers = 4  # ndevs * 3
conf.num_devs = num_devs
conf.no_eval = False
conf.start_eval = False
conf.loss = 'arcface'  # adacos softmax arcface arcfaceneg arcface2 cosface

conf.writer = None
conf.local_rank = None
conf.num_clss = None
conf.dop = None  # top_imp
conf.id2range_dop = None  # sub_imp
conf.explored = None
示例#4
0
from sklearn import manifold, datasets
from sklearn.metrics.pairwise import pairwise_distances
from scipy.spatial.distance import squareform
from matplotlib.patches import Ellipse
from lz import *

from sne.wrapper import Wrapper
# from tsne import TSNE
from sne.vtsne import VTSNE
import lz
from reid import datasets

lz.init_dev((0,))

for path in ['ohmn_match/val/2', 'ohmn_match/val/1']:

    def preprocess(perplexity=30, metric='euclidean'):
        """ Compute pairiwse probabilities for MNIST pixels.
        """
        db = lz.Database('distmat.h5', 'r')
        print(list(db.keys()))
        distmat = db[path]
        dataset = datasets.create('cuhk03', '/home/xinglu/.torch/data/cuhk03/', split_id=0)
        if 'val' in path:
            y = np.asarray([
                pid for fn, pid, cid in dataset.val
            ])
        else:
            y = np.asarray([
                pid for fn, pid, cid in dataset.query
            ])
示例#5
0
def main(args):
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    sys.stderr = Logger(osp.join(args.logs_dir, 'err.txt'))
    lz.init_dev(args.gpu)
    print('config is {}'.format(vars(args)))
    args.seed = 16
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'

    (dataset, num_classes, train_loader, val_loader, test_loader, dop_info,
     trainval_test_loader) = get_data(args)
    # Create model
    model = models.create(
        args.arch,
        dropout=args.dropout,
        pretrained=args.pretrained,
        block_name=args.block_name,
        block_name2=args.block_name2,
        num_features=args.num_classes,
        num_classes=num_classes,
        num_deform=args.num_deform,
        fusion=args.fusion,
    )

    print(model)
    param_mb = sum(p.numel() for p in model.parameters()) / 1000000.0
    logging.info('    Total params: %.2fM' % (param_mb))

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        while not osp.exists(args.resume):
            lz.logging.warning(' no chkpoint {} '.format(args.resume))
            time.sleep(20)
        checkpoint = load_checkpoint(args.resume, map_location='cpu')
        # model.load_state_dict(checkpoint['state_dict'])
        db_name = args.logs_dir + '/' + args.logs_dir.split('/')[-1] + '.h5'
        load_state_dict(model, checkpoint['state_dict'])
        with lz.Database(db_name) as db:
            if 'cent' in checkpoint:
                db['cent'] = to_numpy(checkpoint['cent'])
            db['xent'] = to_numpy(checkpoint['state_dict']['embed2.weight'])
        if args.restart:
            start_epoch_ = checkpoint['epoch']
            best_top1_ = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}".format(
                start_epoch_, best_top1_))
        else:
            start_epoch = checkpoint['epoch']
            best_top1 = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}".format(
                start_epoch, best_top1))
    # if args.gpu is None:
    # model = nn.DataParallel(model)
    # elif len(args.gpu) == 1:
    #     model = nn.DataParallel(model).cuda()
    # else:
    #     model = nn.DataParallel(model, device_ids=range(len(args.gpu))).cuda()

    test_transformer = T.Compose([
        T.RectScale(256, 128),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    from PIL import Image

    for fn in glob.glob(root_path + '/exps/fig/*.jpg'):
        # fn = glob.glob(root_path + '/exps/fig/*.jpg')[0]
        name = fn.split('/')[-1].split('.')[0]

        def preprocess_image(img):
            img = test_transformer(img)
            img = torch.tensor(img.view(1, img.shape[0], img.shape[1],
                                        img.shape[2]),
                               requires_grad=True)
            return img

        img = Image.open(fn).convert('RGB')
        original_image = img.copy().resize((128, 256))
        img = preprocess_image(img)
        model.eval()

        # gbp = GuidedBackprop(model)
        # grads = gbp.generate_gradients(img, 0)
        #
        # pos_sal, neg_sal = get_positive_negative_saliency(grads)
        # save_gradient_images(pos_sal, name + '_pos_sal')
        # save_gradient_images(neg_sal, name + '_neg_sal')

        im_label = 0

        ce = nn.CrossEntropyLoss()
        for i in range(10):
            img.grad = None
            out = model(img)
            loss = ce(out, torch.ones(1, dtype=torch.int64) * 0)
            loss.backward()
            adv_noise = 0.01 * torch.sign(img.grad.data)
            img.data += adv_noise

            recreated_image = recreate_image(img)
            # Process confirmation image
            recreated_image = np.ascontiguousarray(recreated_image)
            prep_confirmation_image = preprocess_image(recreated_image)
            # Forward pass
            confirmation_out = model(prep_confirmation_image)
            # Get prediction
            _, confirmation_prediction = confirmation_out.data.max(1)
            # Get Probability
            confirmation_confidence = \
                nn.functional.softmax(confirmation_out)[0][confirmation_prediction].data.numpy()[0]
            # Convert tensor to int
            confirmation_prediction = confirmation_prediction.numpy()[0]
            if confirmation_prediction != im_label:
                print('Original image was predicted as:', im_label,
                      'with adversarial noise converted to:',
                      confirmation_prediction,
                      'and predicted with confidence of:',
                      confirmation_confidence)
                # Create the image for noise as: Original image - generated image
                noise_image = original_image - recreated_image
                from scipy.misc import imsave
                imsave(
                    f'../gen/{name}_untargeted_adv_noise_from_' +
                    str(im_label) + '_to_' + str(confirmation_prediction) +
                    '.jpg', noise_image)
                # Write image
                imsave(
                    f'../gen/{name}_untargeted_adv_img_from_' + str(im_label) +
                    '_to_' + str(confirmation_prediction) + '.jpg',
                    recreated_image)
                break
示例#6
0
def main(args):
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    sys.stderr = Logger(osp.join(args.logs_dir, 'err.txt'))
    lz.init_dev(args.gpu)
    print('config is {}'.format(vars(args)))
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'

    test_loader = get_data(args)

    # Create model
    model = models.create(
        args.arch,
        dropout=args.dropout,
        pretrained=args.pretrained,
        block_name=args.block_name,
        block_name2=args.block_name2,
        num_features=args.num_classes,
        num_classes=100,
        num_deform=args.num_deform,
        fusion=args.fusion,
        last_conv_stride=args.last_conv_stride,
        last_conv_dilation=args.last_conv_dilation,
    )

    print(model)
    param_mb = sum(p.numel() for p in model.parameters()) / 1000000.0
    print('    Total params: %.2fM' % (param_mb))

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        while not osp.exists(args.resume):
            lz.logging.warning(' no chkpoint {} '.format(args.resume))
            time.sleep(20)
        if torch.cuda.is_available():
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(args.resume, map_location='cpu')
        # model.load_state_dict(checkpoint['state_dict'])
        db_name = args.logs_dir + '/' + args.logs_dir.split('/')[-1] + '.h5'
        load_state_dict(model, checkpoint['state_dict'])
        with lz.Database(db_name) as db:
            if 'cent' in checkpoint:
                db['cent'] = to_numpy(checkpoint['cent'])
            db['xent'] = to_numpy(checkpoint['state_dict']['embed2.weight'])
        if args.restart:
            start_epoch_ = checkpoint['epoch']
            best_top1_ = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}".format(
                start_epoch_, best_top1_))
        else:
            start_epoch = checkpoint['epoch']
            best_top1 = checkpoint['best_top1']
            print("=> Start epoch {}  best top1 {:.1%}".format(
                start_epoch, best_top1))
    if args.gpu is None or len(args.gpu) == 0:
        model = nn.DataParallel(model)
    elif len(args.gpu) == 1:
        model = nn.DataParallel(model).cuda()
    else:
        model = nn.DataParallel(model, device_ids=range(len(args.gpu))).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    features, _ = extract_features(model, test_loader)
    for k in features.keys():
        features[k] = features[k].numpy()
    lz.msgpack_dump(features, work_path + '/reid.person/fea.mp', allow_np=True)
示例#7
0
# -*- coding: future_fstrings -*-
from pathlib import Path
import lz
from lz import *
from torch.nn import CrossEntropyLoss
from tools.vat import VATLoss
from torchvision import transforms as trans

dist = False
num_devs = 4
if dist:
    num_devs = 1
else:
    pass
    lz.init_dev((0, 1, 2, 3))
    # lz.init_dev(lz.get_dev(num_devs))

conf = edict()
conf.num_workers = ndevs * 4
conf.num_devs = num_devs
conf.no_eval = False
conf.start_eval = False
conf.loss = 'arcface'  # adacos softmax arcface arcfaceneg cosface

conf.writer = None
conf.local_rank = None
conf.num_clss = None
conf.dop = None  # top_imp
conf.id2range_dop = None  # sub_imp
conf.explored = None
示例#8
0
        initialize_pretrained_model(model, num_classes, settings)
    return model


def se_resnext101_32x4d(num_classes=1000, pretrained='imagenet'):
    model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3],
                  groups=32,
                  reduction=16,
                  dropout_p=None,
                  inplanes=64,
                  input_3x3=False,
                  downsample_kernel_size=1,
                  downsample_padding=0,
                  num_classes=num_classes)
    if pretrained is not None:
        settings = pretrained_settings['se_resnext101_32x4d'][pretrained]
        initialize_pretrained_model(model, num_classes, settings)
    return model


if __name__ == '__main__':
    lz.init_dev((3, ))
    # model = se_resnext101_32x4d(512)
    model = se_resnet101(512)
    model = model.cuda()
    print('next ')
    import torch

    input = torch.autograd.Variable(torch.randn(8, 3, 112, 112)).cuda()
    output = model(input)
def main_allimg(args):
    global image_shape
    global net
    global ctx_num, env, glargs
    print(args)
    glargs = args
    env = lmdb.open(
        args.input + '/imgs_lmdb',
        readonly=True,
        # max_readers=1,  lock=False,
        # readahead=False, meminit=False
    )
    ctx = []
    cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
    if len(cvd) > 0:
        for i in xrange(len(cvd.split(','))):
            ctx.append(mx.gpu(i))
    if len(ctx) == 0:
        ctx = [mx.cpu()]
        print('use cpu')
    else:
        print('gpu num:', len(ctx))
    ctx_num = len(ctx)
    image_shape = [int(x) for x in args.image_size.split(',')]
    if use_mxnet:
        vec = args.model.split(',')
        assert len(vec) > 1
        prefix = vec[0]
        epoch = int(vec[1])
        print('loading', prefix, epoch)
        net = edict()
        net.ctx = ctx
        net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(
            prefix, epoch)
        # net.arg_params, net.aux_params = ch_dev(net.arg_params, net.aux_params, net.ctx)
        all_layers = net.sym.get_internals()
        net.sym = all_layers['fc1_output']
        net.model = mx.mod.Module(symbol=net.sym,
                                  context=net.ctx,
                                  label_names=None)
        net.model.bind(data_shapes=[('data',
                                     (args.batch_size, 3, image_shape[1],
                                      image_shape[2]))])
        net.model.set_params(net.arg_params, net.aux_params)
    else:
        # sys.path.insert(0, lz.home_path + 'prj/InsightFace_Pytorch/')
        from config import conf
        lz.init_dev(use_devs)
        conf.need_log = False
        conf.fp16 = True  # maybe faster ?
        conf.ipabn = False
        conf.cvt_ipabn = False
        conf.use_chkpnt = False
        conf.net_mode = 'ir_se'
        conf.net_depth = 100
        conf.input_size = 128
        conf.embedding_size = 512
        from Learner import FaceInfer

        net = FaceInfer(
            conf,
            gpuid=range(len(use_devs)),
        )
        net.load_state(
            resume_path=args.model,
            latest=True,
        )
        net.model.eval()
    features_all = None

    filelist = os.path.join(args.input, 'filelist.txt')
    lines = open(filelist, 'r').readlines()
    buffer_images = []
    buffer_embedding = np.zeros((0, emb_size), dtype=np.float16)
    row_idx = 0
    import h5py
    f = h5py.File(args.output, 'w')
    chunksize = 80 * 10**3
    dst = f.create_dataset("feas", (chunksize, 512),
                           maxshape=(None, emb_size),
                           dtype='f2')
    ind_dst = 0
    vdonm2imgs = lz.msgpack_load(args.input + '/../vdonm2imgs.pk')
    for line in lines:
        if row_idx % 1000 == 0:
            logging.info(
                f"processing {(row_idx, len(lines), row_idx / len(lines),)}")
        row_idx += 1
        # if row_idx<203000:continue
        # print('stat', i, len(buffer_images), buffer_embedding.shape, aggr_nums, row_idx)
        videoname = line.strip().split()[0]
        # images2 = glob.glob("%s/%s/*.jpg" % (args.input, videoname))
        # images2 = np.sort(images2).tolist()
        images = vdonm2imgs[videoname]
        assert len(images) > 0
        for image_path in images:
            buffer_images.append(image_path)
        while len(buffer_images) >= args.batch_size:
            embedding = get_feature(buffer_images[0:args.batch_size])
            buffer_images = buffer_images[args.batch_size:]
            if ind_dst + args.batch_size > dst.shape[0]:
                dst.resize((dst.shape[0] + chunksize, emb_size), )
            dst[ind_dst:ind_dst +
                args.batch_size, :] = embedding.astype('float16')
            ind_dst += args.batch_size
            # buffer_embedding = np.concatenate((buffer_embedding, embedding), axis=0).astype('float16')
    if len(buffer_images) != 0:
        embedding = get_feature(buffer_images)
        if ind_dst + args.batch_size > dst.shape[0]:
            dst.resize((dst.shape[0] + chunksize, emb_size), )
        dst[ind_dst:ind_dst + args.batch_size, :] = embedding.astype('float16')
    # lz.save_mat(args.output, buffer_embedding)
    f.flush()
    f.close()
import sklearn
from sklearn.preprocessing import normalize
import mxnet as mx
from mxnet import ndarray as nd
import lz
import lmdb, six
from PIL import Image
from config import conf

use_devs = (
    0,
    1,
    2,
    3,
)
lz.init_dev(use_devs)
image_shape = (3, 112, 112)
net = None
data_size = 203848
emb_size = conf.embedding_size
use_flip = True
ctx_num = 1
xrange = range
env = None
glargs = None
use_mxnet = False


def do_flip(data):
    for idx in xrange(data.shape[0]):
        data[idx, :, :] = np.fliplr(data[idx, :, :])
示例#11
0
    query_loader = DataLoader(
        DirPreprocessor('/home/xinglu/work/body.query/', test_transformer),
        batch_size=128,
        num_workers=12,
        shuffle=False, pin_memory=False)

    gallery_loader = DataLoader(
        DirPreprocessor('/home/xinglu/work/body.test/', test_transformer),
        batch_size=128,
        num_workers=12,
        shuffle=False, pin_memory=False)

    return query_loader, gallery_loader

lz.init_dev(args.gpu)
print('config is {}'.format(vars(args)))
if args.seed is not None:
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
cudnn.benchmark = True

# Create data loaders
assert args.num_instances > 1, "num_instances should be greater than 1"
assert args.batch_size % args.num_instances == 0, \
    'num_instances should divide batch_size'

# Create model
model = models.create(args.arch,
                      dropout=args.dropout,
                      pretrained=args.pretrained,
示例#12
0
import sys

sys.path.insert(0, '/data1/xinglu/prj/InsightFace_Pytorch')
from lz import *
import lz
from torchvision import transforms as trans
import redis
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--modelp', default='mbv3.small', type=str)
args = parser.parse_args()
os.chdir(lz.root_path)
lz.init_dev()
use_mxnet = False
bs = 512


def evaluate_ori(model, path, name, nrof_folds=10, tta=True):
    from utils import ccrop_batch, hflip_batch
    from models.model import l2_norm
    from verifacation import evaluate
    idx = 0
    from data.data_pipe import get_val_pair
    carray, issame = get_val_pair(path, name)
    carray = carray[:, ::-1, :, :]  # BGR 2 RGB!
    if use_mxnet:
        carray *= 0.5
        carray += 0.5
        carray *= 255.
    embeddings = np.zeros([len(carray), 512])