Exemplo n.º 1
0
def resnet50_with_cam(
        num_classes,
        cam_layer,
        state_dict=image_net_state_dict(),
):
    return resnet50(num_classes=num_classes,
                    state_dict=state_dict,
                    resnet_class=BGCamResnet50,
                    cam_layer=cam_layer)
Exemplo n.º 2
0
        wandb.log(
            common.merge(log(is_train=True, saver=train_saver),
                         log(is_train=False, saver=validation_saver)))


if __name__ == '__main__':
    parser = default_parser(run_name='BinaryLabel Resnet50',
                            tags=['binary', 'train', 'baseline'])
    parser.add_argument('--label',
                        type=str,
                        choices=common.LABELS,
                        required=False,
                        default='pigment_network',
                        help='Label for train')

    args = parser.parse_args()

    model = resnet50(num_classes=1, state_dict=image_net_state_dict())

    common.init_wandb(config=args, name=args.name, tags=args.tags)
    args = vars(args)

    args['only_labels'] = [args['label']]

    exp = BinaryLabelExp(model=model, output_labels=['model_output'], **args)

    wandb.config.update({'seed': exp.seed}, allow_val_change=True)
    exp.run()

    log_summary_common()
Exemplo n.º 3
0
def main(cfg: RunConfig):
    torch.set_num_threads(4)
    images_dir = resource_path(config.prop(f'images{cfg.images_size}'),
                               strict=True)

    args, name, tags = get_wandb_starup(cfg)
    print('Got args', args)
    print('Got name', name)
    print('Got tags', tags)

    device = resolve_device(cfg.device)
    print(f'Got device {device}')

    cl_criterion = resolve_criterion(cfg.label, cfg.cl_criterion, device)
    a_criterion = resolve_criterion(cfg.label, cfg.attention_criterion, device)

    ds_index = DsIndex(images_dir=images_dir, masks_dir=config.prop('masks'))
    full_ds = FullDataset(ds_index=ds_index,
                          image_size=cfg.images_size,
                          labels=[cfg.label])
    samples = json.load(open(resource_path(config.prop('sample_indices'))))

    if cfg.train_on_test:
        samples[PHASE_TRAIN] = samples[PHASE_TEST]
        for l in LABELS:
            samples['balance'][l][PHASE_TRAIN] = samples['balance'][l][
                PHASE_TEST]

    if cfg.balanced:
        samples[PHASE_TRAIN] = samples['balance'][cfg.label][PHASE_TRAIN]
        print('Using balance samples')
        for p in PHASES:
            print(len(samples[p]), p, 'samples',
                  'balanced' if p == PHASE_TRAIN else '')

    if cfg.neg_percent != 100:
        samples[PHASE_TRAIN] = samples[f'neg_{cfg.neg_percent}'][
            cfg.label][PHASE_TRAIN]

    datasets = {}
    for p in [PHASE_TRAIN, PHASE_TEST, PHASE_VALIDATION]:
        datasets[p] = BinaryDataset(full_ds,
                                    sample_ids=samples[p],
                                    label=cfg.label)

    model = resnet50_with_cam(num_classes=1,
                              state_dict=image_net_state_dict(),
                              cam_layer=cfg.cam_layer)
    optimizer = Adam(model.parameters(), lr=cfg.lr)
    scheduler = CosineAnnealingLR(optimizer, T_max=5, eta_min=0.005)
    if cfg.no_scheduler:
        scheduler = None

    suspector = ImageSuspector(max_samples_by_label=5)

    wandb_help.create_run(args, name, tags=tags, offline=False)
    run_experiment(
        datasets,
        cl_criterion,
        a_criterion,
        model.to(device),
        optimizer,
        scheduler,
        device,
        suspector,
        cfg,
    )

    suspector.log_to_wandb(full_ds)
    wandb_help.log_summary_best()

    for ds in datasets.values():
        ds.clean()
Exemplo n.º 4
0
def main():
    """
    Input arguments

    usage: binary_gradcam_label_train.py [-h] [--images-dir IMAGES_DIR] [--use_cpu] [--name NAME] [--image-resize IMAGE_RESIZE] [--tags TAGS [TAGS ...]] [--epochs EPOCHS]
                                         [--lr LR] [--batch_size BATCH_SIZE] [--seed SEED] [--no-warmup] [--no-scheduler]
                                         [--label {negative_network,streaks,milia_like_cyst,globules,pigment_network}] [--cam_layer CAM_LAYER] [--lambda LOSS_LAMBDA]
                                         [--no-attention-loss] [--when-present] [--ae-loss-bce]

    optional arguments:
      -h, --help            show this help message and exit
      --images-dir IMAGES_DIR
                            Location of train images, default value present in config.py (default: /Users/ifkbhit/itmo/diploma/resources/isic_train/)
      --use_cpu             Use cpu only (default: False)
      --name NAME           WanDB run name (default: Resnet50)
      --image-resize IMAGE_RESIZE
                            Image resize (default: 256)
      --tags TAGS [TAGS ...]
                            Additional tags to run (default: ['binary', 'train'])
      --epochs EPOCHS       Num epochs (default: 20)
      --lr LR               Learning rate (default: 0.001)
      --batch_size BATCH_SIZE
                            Batch size (default: 10)
      --seed SEED           Int or 'none', or 'rand'. 'none' - do nothing with seed, 'rand' - generate random seed (default: rand)
      --no-warmup           Disable cache warmup (default: False)
      --no-scheduler        Don't use scheduler (default: False)
      --label {negative_network,streaks,milia_like_cyst,globules,pigment_network}
                            Label for train (default: pigment_network)
      --cam_layer CAM_LAYER
                            Layer for gradcam extractions (default: layer4.2)
      --lambda LOSS_LAMBDA  Coefficient for loss: classification_loss + lambda * attention_loss (default: 1)
      --no-attention-loss   Run without attention loss calculation (default: False)
      --when-present        Calculate attention loss only if sample has attribute (default: False)
      --ae-loss-bce         Use bce as attention loss (default: False)
    """
    parser = default_parser(run_name='Resnet50', tags=['binary', 'train'])
    parser.add_argument('--label',
                        type=str,
                        choices=common.LABELS,
                        required=False,
                        default='pigment_network',
                        help='Label for train')

    parser.add_argument('--cam_layer',
                        type=str,
                        required=False,
                        default='layer4.2',
                        help='Layer for gradcam extractions')
    parser.add_argument(
        '--lambda',
        type=int,
        required=False,
        default=1,
        dest='loss_lambda',
        help=
        'Coefficient for loss: classification_loss + lambda * attention_loss')

    parser.add_argument('--no-attention-loss',
                        action='store_true',
                        required=False,
                        help='Run without attention loss calculation')

    parser.add_argument(
        '--when-present',
        action='store_true',
        required=False,
        help='Calculate attention loss only if sample has attribute')

    parser.add_argument('--ae-loss-bce',
                        required=False,
                        action='store_true',
                        help='Use bce as attention loss')

    args = parser.parse_args()

    if args.no_attention_loss:
        run_type = "baseline"
    else:
        run_type = "gradcam"

    run_name = f'[{args.label}][{run_type}] {args.name}; lambda={args.loss_lambda}; lr={args.lr}'

    bgcam_resnet = resnet50_with_cam(num_classes=1,
                                     state_dict=image_net_state_dict(),
                                     cam_layer=args.cam_layer)

    tags = build_tags(args, run_type)

    common.init_wandb(config=args, name=run_name, tags=tags)
    d_args = vars(args)

    # обучение происходит по одному аттрибуту
    d_args['only_labels'] = [args.label]

    exp = BinaryGradCamLabelExp(model=bgcam_resnet,
                                output_labels=['model_output', 'ac'],
                                **d_args)

    # Использовать BCE, вместо MSE
    if args.ae_loss_bce:
        exp.criterion_e = nn.BCEWithLogitsLoss()

    # Не считать attention loss, т.е. запустить baseline
    exp.calculate_attention_error_loss = not args.no_attention_loss

    # веса для классификационной BCEWithLogitsLoss
    pos_weights = torch.tensor([BCE_WEIGHTS[args.label]])
    exp.pos_weights = pos_weights
    print("Got pos_weights =", pos_weights)

    # не считать attention loss, если нет дерм. признака на фото(маска пустая)
    exp.when_present = args.when_present

    # номера примеров из датасета, на которых стоит сохранить визуализацию
    exp.suspect_attention = {
        # for train phase
        True: [
            13,
            16,
            17,
            19,
            20,  # with label
            11,
            14,
            23,
            24,
            26  # without label
        ],

        # for validation phase
        False: [
            49,
            51,
            55,
            56,
            63,  # with label
            40,
            45,
            92,
            99,
            110  # without label
        ]
    }

    # лямбда параметр для attention loss
    exp.loss_lambda = args.loss_lambda

    wandb.config.update({
        'seed': exp.seed,
    }, allow_val_change=True)
    # запуск обучения
    exp.run()
    print('Logging summary')
    # логгирование результатов в wandb summary section
    log_summary_common()
    print('Logging images')

    # логгирование визуализации
    to_pil = transforms.ToPILImage()
    for sample_key, epochs_maps in exp.suspects.items():
        images = []
        for epoch in sorted(epochs_maps.keys()):
            if epoch == -1:
                images.append(
                    wandb.Image(to_pil(epochs_maps[epoch]),
                                caption='Orig_mask'))
                continue

            data = epochs_maps[epoch]
            img_t = data['map'].unsqueeze(0)
            img_t = F.interpolate(img_t, (exp.image_resize, exp.image_resize),
                                  mode='bilinear',
                                  align_corners=True)
            img_t = img_t[0]
            capt = f'epoch_{epoch + 1}_{"correct" if data["correct"] else "incorrect"}'
            images.append(wandb.Image(to_pil(img_t), caption=capt))
        wandb.log({sample_key: images})