def test_resnet_resnet101(self):
        with _test_eager_guard():
            model = resnet101(pretrained=False)
            egr_data = paddle.to_tensor(self.data)
            egr_data.stop_gradient = False
            egr_out = model(egr_data)
            egr_preds = paddle.argmax(egr_out, axis=1)
            egr_label_onehot = paddle.nn.functional.one_hot(
                paddle.to_tensor(egr_preds), num_classes=egr_out.shape[1])
            egr_target = paddle.sum(egr_out * egr_label_onehot, axis=1)

            egr_g = paddle.grad(outputs=egr_target, inputs=egr_out)[0]
            egr_g_numpy = egr_g.numpy()
            self.assertEqual(list(egr_g_numpy.shape), list(egr_out.shape))

        model = resnet101(pretrained=False)
        data = paddle.to_tensor(self.data)
        data.stop_gradient = False
        out = model(data)
        preds = paddle.argmax(out, axis=1)
        label_onehot = paddle.nn.functional.one_hot(paddle.to_tensor(preds),
                                                    num_classes=out.shape[1])
        target = paddle.sum(out * label_onehot, axis=1)

        g = paddle.grad(outputs=target, inputs=out)[0]
        g_numpy = g.numpy()
        self.assertEqual(list(g_numpy.shape), list(out.shape))

        self.assertTrue(np.array_equal(egr_out, out))
        self.assertTrue(np.array_equal(egr_g_numpy, g_numpy))
Пример #2
0
def build_model():
    backbone = resnet101()
    cut = -2
    encoder = nn.Sequential(*list(backbone.children())[:cut])

    model = Deoldify(encoder,
                     3,
                     blur=True,
                     y_range=(-3, 3),
                     norm_type='Spectral',
                     self_attention=True,
                     nf_factor=2)
    return model
Пример #3
0
    def test_algo(self):
        from paddle.vision.models import resnet34, resnet50, resnet101, mobilenet_v2

        # Here we use four models to give an illustration. Using more models shows more impressive results.
        list_models = {
            'resnet34': resnet34(pretrained=False),
            'resnet50': resnet50(pretrained=False),
            'resnet101': resnet101(pretrained=False),
            'mobilenet_v2': mobilenet_v2(pretrained=False)
        }
        consensus = it.ConsensusInterpreter(it.SmoothGradInterpreter,
                                            list_models.values(),
                                            device='gpu:0')
        img_path = np.random.randint(0,
                                     255,
                                     size=(1, 64, 64, 3),
                                     dtype=np.uint8)
        exp = consensus.interpret(img_path, n_samples=5)
def run(args, train_set, test_set):
    model = resnet101(pretrained=True, num_classes=len(train_set.classes))

    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True,
                              num_workers=8)
    test_loader = DataLoader(test_set,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=16)

    no_decay_params = []
    decay_params = []
    for n, v in model.named_parameters():
        if 'norm' in n or 'bias' in n:
            no_decay_params.append(v)
        else:
            decay_params.append(v)

    list_params = [{
        'params': decay_params,
        'weight_decay': args.wd
    }, {
        'params': no_decay_params,
        'weight_decay': 0.0
    }]

    scheduler = MultiStepDecay(learning_rate=args.lr,
                               milestones=[int(args.epochs * 0.67)],
                               gamma=0.1)
    optimizer = Momentum(scheduler, parameters=list_params)

    logging.info("Training Started...")
    logging.info(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

    for epoch in range(1, args.epochs + 1, 1):
        model.train()
        bar = tqdm(train_loader)
        bar.set_description("Training")
        losses = 0
        for idx, batch in enumerate(bar):
            imgs, targets = batch

            logits = model(imgs)

            loss = F.cross_entropy(logits, targets)
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()
            losses += loss.item() * train_loader.batch_size
            acc = (logits.argmax(1) == targets).cast('float32').mean()

            if idx % 100 == 0:
                logging.info(
                    f" EPOCH| {epoch}, BATCH| {idx}/{len(bar)}, LOSS| {loss.item(): .4f}, ACC| {acc.item(): .4f}"
                )

        logging.info(
            f" EPOCH| {epoch}, BATCH| {idx}/{len(bar)}, LOSS| {loss.item(): .4f}, ACC| {acc.item(): .4f}"
        )
        scheduler.step()

        logging.info(
            f"EPOCH | {epoch} TOTAL LOSS | {losses / len(train_loader.dataset): .4f}"
        )

        print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        print(
            f"EPOCH | {epoch} TOTAL LOSS | {losses / len(train_loader.dataset): .4f}"
        )

        # writer.add_scalar('Loss/train', losses / len(train_loader.dataset), epoch)

        loss_test, acc_test = evaluate(model, test_loader)
        logging.info(f"TEST ACC| {acc_test}, LOSS| {loss_test}")
        print(f"TEST ACC| {acc_test}, LOSS| {loss_test}")

        if epoch % args.ckpt == 0:
            paddle.save(model.state_dict(),
                        f'./work_dirs/result_{args.name}/ckpt-{epoch}.pd')

    loss_test, acc_test = evaluate(model, test_loader)
    logging.info(f"TEST ACC| {acc_test}, LOSS| {loss_test}")
    print(f"TEST ACC| {acc_test}, LOSS| {loss_test}")

    paddle.save(model.state_dict(),
                f'./work_dirs/result_{args.name}/ckpt-final.pd')
 def setUp(self):
     self.model = resnet101(pretrained=False)
Пример #6
0
def main(args):
    # data
    def sort_key_func(x):
        # for imagenet val set.
        return x.split('/')[-1]

    if '*' in args.data_list:
        data_list = args.data_list.replace('\\', '')
        files = glob(data_list)
        np.random.seed(0)
        files = np.random.permutation(files)
        list_image_paths = files[:args.num_images]

    print(args.data_list)
    print(len(list_image_paths))

    # model
    model_init_args = {'pretrained': True, 'num_classes': args.num_classes}
    if args.model_weights is not None:
        model_init_args['pretrained'] = False

    if args.model.lower() == 'resnet50':
        if 'lrp' == args.it:
            from tutorials.assets.lrp_model import resnet50_lrp
            paddle_model = resnet50_lrp(**model_init_args)
        else:
            paddle_model = resnet50(**model_init_args)
    elif args.model.lower() == 'resnet101':
        paddle_model = resnet101(**model_init_args)
    else:
        paddle_model = resnet50(**model_init_args)

    ## load weights if given
    if args.model_weights is not None:
        state_dict = paddle.load(args.model_weights)
        paddle_model.set_dict(state_dict)
        print("Load weights from", args.model_weights)

    # interpreter instance
    to_test_list = {
        'lime': it.LIMECVInterpreter,
        'gradcam': it.GradCAMInterpreter,
        'intgrad': it.IntGradCVInterpreter,
        'smoothgrad': it.SmoothGradInterpreter,
        'gradshap': it.GradShapCVInterpreter,
        'scorecam': it.ScoreCAMInterpreter,
        'glime': it.GLIMECVInterpreter,
        'lrp': it.LRPCVInterpreter
    }
    interpreter = to_test_list[args.it](paddle_model, device=args.device)
    # interpreter configs
    it_configs = args.it_configs
    # evaluation configs
    eval_configs = args.eval_configs

    # image resize config.
    # depreciated set: {"resize_to": 256, "crop_to": 224}
    if args.img_resize_config is None:
        img_resize_config = {"resize_to": 224, "crop_to": 224}
    else:
        img_resize_config = args.img_resize_config

    if 'glime' == args.it:
        interpreter.set_global_weights(args.global_weights)

    num_limit_adapter = {}
    if args.eval_num_limit_adapter is not None:
        lime_results = dict(
            np.load(args.eval_num_limit_adapter, allow_pickle=True))
        img_path_list = list(lime_results.keys())
        for i in range(len(img_path_list)):
            img_path = img_path_list[i]
            b = lime_results[img_path].item()
            num_limit_adapter[img_path] = len(np.unique(b['segmentation']))

    # evaluator instance
    del_ins_evaluator = it.DeletionInsertion(paddle_model, device=args.device)
    pert_evaluator = it.Perturbation(paddle_model,
                                     device=args.device,
                                     compute_MoRF=False)

    # compute exp
    del_scores = []
    ins_scores = []
    LeRF_scores = []

    eval_results = {}
    i = 1
    if os.path.exists(f'./work_dirs/{get_exp_id(args)}.npz'):
        eval_results = dict(
            np.load(f'./work_dirs/{get_exp_id(args)}.npz', allow_pickle=True))

    for img_path in tqdm(list_image_paths, leave=True, position=0):
        if args.it == 'lime' or args.it == 'glime':
            if img_path in eval_results:
                exp = eval_results[img_path].item()
            else:
                exp = interpreter.interpret(img_path,
                                            **it_configs,
                                            **img_resize_config,
                                            visual=False)
                if hasattr(interpreter, 'lime_results'):
                    exp = interpreter.lime_results
        else:
            exp = interpreter.interpret(img_path,
                                        **it_configs,
                                        **img_resize_config,
                                        visual=False)

        if img_path in num_limit_adapter:
            eval_configs['limit_number_generated_samples'] = num_limit_adapter[
                img_path]
            print(img_path, 'update eval_configs:', eval_configs)

        results = del_ins_evaluator.evaluate(img_path, exp, **eval_configs,
                                             **img_resize_config)
        del_scores.append(results['del_probas'].mean())
        ins_scores.append(results['ins_probas'].mean())

        # print(results['del_probas'])
        # print(results['ins_probas'])

        # results = pert_evaluator.evaluate(img_path, exp, **eval_configs)
        # LeRF_scores.append(results['LeRF_score'])

        # print(results['LeRF_probas'])

        if args.it == 'lime' or args.it == 'glime':
            exp['del_probas'] = results['del_probas']
            exp['ins_probas'] = results['ins_probas']
            eval_results[img_path] = copy.deepcopy(exp)
            np.savez(f'./work_dirs/{get_exp_id(args)}.npz', **eval_results)
        else:
            eval_results[img_path] = {
                'del_probas': results['del_probas'],
                'ins_probas': results['ins_probas']
            }
            np.savez(f'./work_dirs/{get_exp_id(args)}.npz', **eval_results)

        if i % 20 == 0:
            print("Del score:\t", sum(del_scores) / len(del_scores))
            print("Ins score:\t", sum(ins_scores) / len(ins_scores))
            logging.info(f"{i}")
            logging.info(
                f"Del score:\t {sum(del_scores) / len(del_scores): .5f}")
            logging.info(
                f"Ins score:\t {sum(ins_scores) / len(ins_scores): .5f}")
        i += 1

    print("Del score:\t", sum(del_scores) / len(del_scores))
    print("Ins score:\t", sum(ins_scores) / len(ins_scores))
    # print("LeRF score:\t", sum(LeRF_scores) / len(LeRF_scores))

    logging.info(f"Del score:\t {sum(del_scores) / len(del_scores): .5f}")
    logging.info(f"Ins score:\t {sum(ins_scores) / len(ins_scores): .5f}")
    # logging.info(f"LeRF score:\t {sum(LeRF_scores) / len(LeRF_scores): .5f}")

    logging.info(
        f"{sum(del_scores) / len(del_scores): .5f} \t {sum(ins_scores) / len(ins_scores): .5f}"
    )
    # logging.info(f"{sum(del_scores) / len(del_scores): .5f} \t {sum(ins_scores) / len(ins_scores): .5f} \t {sum(LeRF_scores) / len(LeRF_scores): .5f}")

    return