def test_json_against_nncf_config_schema(config_test_struct):
    config_path, should_pass = config_test_struct
    if should_pass:
        _ = Config.from_json(str(config_path))
    else:
        with pytest.raises(jsonschema.ValidationError):
            _ = Config.from_json(str(config_path))
示例#2
0
def main(argv):
    parser = get_argument_parser()
    args = parser.parse_args(args=argv)
    config = Config.from_json(args.config)
    config.update_from_args(args, parser)
    if config.dist_url == "env://":
        config.update_from_env()

    configure_paths(config)
    source_root = Path(__file__).absolute().parents[2]  # nncf root
    create_code_snapshot(source_root,
                         osp.join(config.log_dir, "snapshot.tar.gz"))

    if config.seed is not None:
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    config.execution_mode = get_execution_mode(config)

    if not is_binarization(config):
        start_worker(main_worker, config)
    else:
        from examples.classification.binarization_worker import main_worker_binarization
        start_worker(main_worker_binarization, config)
def start_evaluation(args):
    """Launches the evaluation process"""

    if args.dataset == 'vgg':
        dataset = VGGFace2(args.val,
                           args.v_list,
                           args.v_land,
                           landmarks_training=True)
    elif args.dataset == 'celeb':
        dataset = CelebA(args.val, args.v_land, test=True)
    else:
        dataset = NDG(args.val, args.v_land)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = t.Compose(
            [Rescale((48, 48)), ToTensor(switch_rb=True)])
    else:
        exit()

    val_loader = DataLoader(dataset,
                            batch_size=args.val_batch_size,
                            num_workers=4,
                            shuffle=False,
                            pin_memory=True)

    model = models_landmarks['landnet']()

    assert args.snapshot is not None
    if args.compr_config:
        config = Config.from_json(args.compr_config)
        compression_algo = create_compression_algorithm(model, config)
        model = compression_algo.model

    log.info('Testing snapshot ' + args.snapshot + ' ...')
    model = load_model_state(model,
                             args.snapshot,
                             args.device,
                             eval_state=True)
    model.eval()
    cudnn.benchmark = True
    model = torch.nn.DataParallel(
        model,
        device_ids=[args.device],
    )

    log.info('Face landmarks model:')
    log.info(model)

    avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
    log.info('Avg RMSE error: {}'.format(avg_err))
    log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
    log.info('Failure rate: {}'.format(failures_rate))
    if args.compr_config and "sparsity_level" in compression_algo.statistics():
        log.info("Sparsity level: {0:.2f}".format(
            compression_algo.statistics()
            ['sparsity_rate_for_sparsified_modules']))
def main(argv):
    parser = get_argument_parser()
    args = parser.parse_args(args=argv)
    config = Config.from_json(args.config)
    config.update_from_args(args, parser)
    configure_paths(config)
    source_root = Path(__file__).absolute().parents[2]  # nncf root
    create_code_snapshot(source_root, osp.join(config.log_dir, "snapshot.tar.gz"))

    config.execution_mode = get_execution_mode(config)

    if config.dataset_dir is not None:
        config.train_imgs = config.train_anno = config.test_imgs = config.test_anno = config.dataset_dir
    start_worker(main_worker, config)
def main(argv):
    parser = get_common_argument_parser()
    arguments = parser.parse_args(args=argv)
    config = Config.from_json(arguments.config)
    config.update_from_args(arguments, parser)
    if config.dist_url == "env://":
        config.update_from_env()

    if config.mode.lower() != 'test':
        if not osp.exists(config.log_dir):
            os.makedirs(config.log_dir)

        config.log_dir = str(config.log_dir)
        configure_paths(config)
        print("Save directory:", config.log_dir)
    else:
        config.log_dir = "/tmp/"

    config.execution_mode = get_execution_mode(config)
    start_worker(main_worker, config)
def train(args):
    """Launches training of landmark regression model"""
    input_size = models_landmarks['landnet']().get_input_res()
    if args.dataset == 'vgg':
        drops_schedule = [1, 6, 9, 13]
        dataset = VGGFace2(args.train, args.t_list, args.t_land, landmarks_training=True)
    elif args.dataset == 'celeba':
        drops_schedule = [10, 20]
        dataset = CelebA(args.train, args.t_land)
    else:
        drops_schedule = [90, 140, 200]
        dataset = NDG(args.train, args.t_land)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = transforms.Compose([landmarks_augmentation.Rescale((56, 56)),
                                                landmarks_augmentation.Blur(k=3, p=.2),
                                                landmarks_augmentation.HorizontalFlip(p=.5),
                                                landmarks_augmentation.RandomRotate(50),
                                                landmarks_augmentation.RandomScale(.8, .9, p=.4),
                                                landmarks_augmentation.RandomCrop(48),
                                                landmarks_augmentation.ToTensor(switch_rb=True)])
    else:
        log.info('Error: training dataset has no landmarks data')
        exit()

    train_loader = DataLoader(dataset, batch_size=args.train_batch_size, num_workers=4, shuffle=True)
    writer = SummaryWriter('./logs_landm/{:%Y_%m_%d_%H_%M}_'.format(datetime.datetime.now()) + args.snap_prefix)
    model = models_landmarks['landnet']()

    set_dropout_fn = model.set_dropout_ratio

    compression_algo = None
    if args.snap_to_resume is not None:
            config = Config.from_json(args.compr_config)
            compression_algo = create_compression_algorithm(model, config)
            model = compression_algo.model

        log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')
        model = load_model_state(model, args.snap_to_resume, args.device, eval_state=False)
        model = torch.nn.DataParallel(model, device_ids=[args.device])
示例#7
0
def main():
    model_bin, model_xml = get_ir_paths(args.model, args.bin)

    config = Config.from_json(args.config)

    input_infos_list = create_input_infos(config)
    image_size = input_infos_list[0].shape[-1]

    size = int(image_size / 0.875)

    print('IE version: {}'.format(get_version()))

    # NOTE: importing torch after loading IE to plugin to avoid issue with built-in MKLDNN of PyTorch
    plugin = IEPlugin(device='CPU',
                      plugin_dirs=args.cpu_plugin_dir)
    plugin.add_cpu_extension(os.path.join(args.cpu_plugin_dir, "libcpu_extension.so"))
    net = IENetwork(model=model_xml, weights=model_bin)
    exec_net = getExecNet(plugin, net)
    from torch.utils.data import DataLoader
    import torchvision.datasets as datasets
    import torchvision.transforms as transforms

    val_loader = DataLoader(
        datasets.ImageFolder(args.data, transforms.Compose([
            transforms.Resize(size),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])),
        batch_size=1, shuffle=False, num_workers=4, pin_memory=True)
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    config['log_dir'] = args.output_dir

    infer_fn = partial(infer_ie_model, net=net)
    validate_general(val_loader, exec_net, infer_fn)

    validate_torch_model(os.path.join(args.output_dir, "PTH"), config=config, num_layers=args.num_layers,
                         dump=args.dump, val_loader=val_loader, cuda=args.cuda)
def test_model_can_be_loaded_with_resume(_params, tmp_path):
    p = _params
    config_path = p['nncf_config_path']
    checkpoint_path = p['checkpoint_path']

    config = Config.from_json(str(config_path))
    config.execution_mode = p['execution_mode']

    config.current_gpu = 0
    config.log_dir = str(tmp_path)
    config.device = get_device(config)
    config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        config.dist_url = "tcp://127.0.0.1:9898"
        config.dist_backend = "nccl"
        config.rank = 0
        config.world_size = 1
        configure_distributed(config)

    model_name = config['model']
    model = load_model(model_name,
                       pretrained=False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params'))

    patch_torch_operators()
    compression_algo, model = create_compressed_model(model, config)
    model, _ = prepare_model_for_execution(model, config)

    if config.distributed:
        compression_algo.distributed()

    reset_context('orig')
    reset_context('quantized_graphs')
    checkpoint = torch.load(checkpoint_path, map_location='cpu')
    load_state(model, checkpoint['state_dict'], is_resume=True)
def main():
    parser = argparse.ArgumentParser(description='Evaluation script for Face Recognition in PyTorch')
    parser.add_argument('--devices', type=int, nargs='+', default=[0], help='CUDA devices to use.')
    parser.add_argument('--embed_size', type=int, default=128, help='Size of the face embedding.')
    parser.add_argument('--val_data_root', dest='val', required=True, type=str, help='Path to validation data.')
    parser.add_argument('--val_list', dest='v_list', required=True, type=str, help='Path to train data image list.')
    parser.add_argument('--val_landmarks', dest='v_land', default='', required=False, type=str,
                        help='Path to landmarks for the test images.')
    parser.add_argument('--val_batch_size', type=int, default=8, help='Validation batch size.')
    parser.add_argument('--snap', type=str, required=False, help='Snapshot to evaluate.')
    parser.add_argument('--roc_fname', type=str, default='', help='ROC file.')
    parser.add_argument('--dump_embeddings', action='store_true', help='Dump embeddings to summary writer.')
    parser.add_argument('--dist', choices=['l2', 'cos'], type=str, default='cos', help='Distance.')
    parser.add_argument('--flipped_emb', action='store_true', help='Flipped embedding concatenation trick.')
    parser.add_argument('--show_failed', action='store_true', help='Show misclassified pairs.')
    parser.add_argument('--model', choices=models_backbones.keys(), type=str, default='rmnet', help='Model type.')
    parser.add_argument('--engine', choices=['pt', 'ie'], type=str, default='pt', help='Framework to use for eval.')

    # IE-related options
    parser.add_argument('--fr_model', type=str, required=False)
    parser.add_argument('--lm_model', type=str, required=False)
    parser.add_argument('-pp', '--plugin_dir', type=str, default=None, help='Path to a plugin folder')
    parser.add_argument('-c', '--compr_config', help='Path to a file with compression parameters', required=False)
    args = parser.parse_args()

    if args.engine == 'pt':
        assert args.snap is not None, 'To evaluate PyTorch snapshot, please, specify --snap option.'

        if args.compr_config:
            patch_torch_operators()

        with torch.cuda.device(args.devices[0]):
            data, embeddings_fun = load_test_dataset(args)
            model = models_backbones[args.model](embedding_size=args.embed_size, feature=True)

            if args.compr_config:
                config = Config.from_json(args.compr_config)
                compression_algo = create_compression_algorithm(model, config)
                model = compression_algo.model

            model = load_model_state(model, args.snap, args.devices[0])
            evaluate(args, data, model, embeddings_fun, args.val_batch_size, args.dump_embeddings,
                     args.roc_fname, args.snap, True, args.show_failed)

            if args.compr_config and "sparsity_level" in compression_algo.statistics():
                log.info("Sparsity level: {0:.2f}".format(
                    compression_algo.statistics()['sparsity_rate_for_sparsified_modules']))
    else:
        from utils.ie_tools import load_ie_model

        assert args.fr_model is not None, 'To evaluate IE model, please, specify --fr_model option.'
        fr_model = load_ie_model(args.fr_model, 'CPU', args.plugin_dir)
        lm_model = None
        if args.lm_model:
            lm_model = load_ie_model(args.lm_model, 'CPU', args.plugin_dir)
        input_size = tuple(fr_model.get_input_shape()[2:])

        lfw = LFW(args.val, args.v_list, args.v_land)
        if not lfw.use_landmarks or lm_model:
            lfw.transform = t.Compose([ResizeNumpy(220), CenterCropNumpy(input_size)])
            lfw.use_landmarks = False
        else:
            log.info('Using landmarks for the LFW images.')
            lfw.transform = t.Compose([ResizeNumpy(input_size)])

        evaluate(args, lfw, fr_model, partial(compute_embeddings_lfw_ie, lm_model=lm_model), val_batch_size=1,
                 dump_embeddings=False, roc_fname='', snap_name='', verbose=True, show_failed=False)
示例#10
0
def train(args):
    """Performs training of a face recognition network"""
    input_size = models_backbones[args.model]().get_input_res()
    if args.train_dataset == 'vgg':
        assert args.t_list
        dataset = VGGFace2(args.train, args.t_list, args.t_land)
    elif args.train_dataset == 'imdbface':
        dataset = IMDBFace(args.train, args.t_list)
    elif args.train_dataset == 'trp':
        dataset = TrillionPairs(args.train, args.t_list)
    else:
        dataset = MSCeleb1M(args.train, args.t_list)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = t.Compose([
            augm.HorizontalFlipNumpy(p=.5),
            augm.CutOutWithPrior(p=0.05, max_area=0.1),
            augm.RandomRotationNumpy(10, p=.95),
            augm.ResizeNumpy(input_size),
            augm.BlurNumpy(k=5, p=.2),
            augm.NumpyToTensor(switch_rb=True)
        ])
    else:
        dataset.transform = t.Compose([
            augm.ResizeNumpy(input_size),
            augm.HorizontalFlipNumpy(),
            augm.RandomRotationNumpy(10),
            augm.NumpyToTensor(switch_rb=True)
        ])

    if args.weighted:
        train_weights = dataset.get_weights()
        train_weights = torch.DoubleTensor(train_weights)
        sampler = torch.utils.data.sampler.WeightedRandomSampler(
            train_weights, len(train_weights))
        train_loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=args.train_batch_size,
            sampler=sampler,
            num_workers=3,
            pin_memory=False)
    else:
        train_loader = DataLoader(dataset,
                                  batch_size=args.train_batch_size,
                                  num_workers=4,
                                  shuffle=True)

    lfw = LFW(args.val, args.v_list, args.v_land)
    if lfw.use_landmarks:
        log.info('Use alignment for the test data')
        lfw.transform = t.Compose(
            [augm.ResizeNumpy(input_size),
             augm.NumpyToTensor(switch_rb=True)])
    else:
        lfw.transform = t.Compose([
            augm.ResizeNumpy((160, 160)),
            augm.CenterCropNumpy(input_size),
            augm.NumpyToTensor(switch_rb=True)
        ])

    log_path = './logs/{:%Y_%m_%d_%H_%M}_{}'.format(datetime.datetime.now(),
                                                    args.snap_prefix)
    writer = SummaryWriter(log_path)

    if not osp.exists(args.snap_folder):
        os.mkdir(args.snap_folder)

    model = models_backbones[args.model](embedding_size=args.embed_size,
                                         num_classes=dataset.get_num_classes(),
                                         feature=False)

    set_dropout_fn = model.set_dropout_ratio

    compression_algo = None
    if args.snap_to_resume is not None:
        if args.compr_config:
            config = Config.from_json(args.compr_config)
            compression_algo = create_compression_algorithm(model, config)
            model = compression_algo.model

        log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')
        model = load_model_state(model,
                                 args.snap_to_resume,
                                 args.devices[0],
                                 eval_state=False)
        model = torch.nn.DataParallel(model, device_ids=args.devices)
    else:
        model = torch.nn.DataParallel(model,
                                      device_ids=args.devices,
                                      output_device=args.devices[0])
        model.cuda()
        model.train()
        cudnn.benchmark = True

    if args.to_onnx is not None:
        if args.compr_config:
            compression_algo.export_model(args.to_onnx)
        else:
            model = model.eval().cpu()
            input_shape = tuple([1, 3] + list(input_size))
            with torch.no_grad():
                torch.onnx.export(model.module,
                                  torch.randn(input_shape),
                                  args.to_onnx,
                                  verbose=True)

        print("Saved to", args.to_onnx)
        return

    log.info('Face Recognition model:')
    log.info(model)

    if args.mining_type == 'focal':
        softmax_criterion = AMSoftmaxLoss(gamma=args.gamma,
                                          m=args.m,
                                          margin_type=args.margin_type,
                                          s=args.s)
    else:
        softmax_criterion = AMSoftmaxLoss(t=args.t,
                                          m=0.35,
                                          margin_type=args.margin_type,
                                          s=args.s)
    aux_losses = MetricLosses(dataset.get_num_classes(), args.embed_size,
                              writer)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    if args.compr_config:
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [0, 2, 4, 6, 8])
    else:
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [3, 6, 9, 13])

    log.info('Epoch length: %d' % len(train_loader))
    for epoch_num in range(args.epoch_total_num):
        log.info('Epoch: %d' % epoch_num)
        scheduler.step()

        if epoch_num > 6 or args.compr_config:
            set_dropout_fn(0.)

        classification_correct = 0
        classification_total = 0

        for i, data in enumerate(train_loader, 0):
            iteration = epoch_num * len(train_loader) + i

            if iteration % args.val_step == 0:
                snapshot_name = osp.join(
                    args.snap_folder,
                    args.snap_prefix + '_{0}.pt'.format(iteration))
                if iteration > 0:
                    log.info('Saving Snapshot: ' + snapshot_name)
                    save_model_cpu(model, optimizer, snapshot_name, epoch_num)

                log.info('Evaluating Snapshot: ' + snapshot_name)
                model.eval()
                same_acc, diff_acc, all_acc, auc = evaluate(
                    args,
                    lfw,
                    model,
                    compute_embeddings_lfw,
                    args.val_batch_size,
                    verbose=False)

                model.train()

                log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(
                    same_acc, diff_acc))
                log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
                log.info('Validation AUC: {0:.4f}'.format(auc))
                writer.add_scalar('Epoch', epoch_num, iteration)
                writer.add_scalar('Accuracy/Val_same_accuracy', same_acc,
                                  iteration)
                writer.add_scalar('Accuracy/Val_diff_accuracy', diff_acc,
                                  iteration)
                writer.add_scalar('Accuracy/Val_accuracy', all_acc, iteration)
                writer.add_scalar('Accuracy/AUC', auc, iteration)

            data, label = data['img'], data['label'].cuda()
            features, sm_outputs = model(data)

            optimizer.zero_grad()
            aux_losses.init_iteration()
            aux_loss, aux_log = aux_losses(features, label, epoch_num,
                                           iteration)
            loss_sm = softmax_criterion(sm_outputs, label)
            compr_loss = compression_algo.loss() if args.compr_config else 0
            loss = loss_sm + aux_loss + compr_loss
            loss.backward()
            aux_losses.end_iteration()
            optimizer.step()

            _, predicted = torch.max(sm_outputs.data, 1)
            classification_total += int(label.size(0))
            classification_correct += int(torch.sum(predicted.eq(label)))
            train_acc = float(classification_correct) / classification_total

            if i % 10 == 0:
                log.info('Iteration %d, Softmax loss: %.4f, Total loss: %.4f' %
                         (iteration, loss_sm, loss) + aux_log)
                log.info('Learning rate: %f' % scheduler.get_lr()[0])
                writer.add_scalar('Loss/train_loss', loss, iteration)
                writer.add_scalar('Loss/softmax_loss', loss_sm, iteration)
                writer.add_scalar('Learning_rate',
                                  scheduler.get_lr()[0], iteration)
                writer.add_scalar('Accuracy/classification', train_acc,
                                  iteration)
                if args.compr_config and "sparsity_level" in compression_algo.statistics(
                ):
                    log.info('Sparsity_level: %.4f' %
                             compression_algo.statistics()["sparsity_level"])
                    writer.add_scalar(
                        'Sparsity_level',
                        compression_algo.statistics()["sparsity_level"],
                        iteration)

            if args.compr_config:
                compression_algo.scheduler.step()

        if args.compr_config:
            compression_algo.scheduler.epoch_step()