예제 #1
0
    log_progress(2700, 0.8, 0.64)
    log_progress(2800, 0.9, 0.81)
    log_progress(2900, 0.95, 0.95*0.95)
    log_progress(3000, 1.0, 1.0)
    '''
    print('打印网络结构')
    summary(model, (3, 448, 448))

if __name__ == '__main__':
    i_debug = 10
    
    args = parse_args()
    print(args, flush=True)
    Config = LoadConfig(args, 'train')
    Config.cls_2 = args.cls_2
    Config.cls_2xmul = args.cls_mul
    assert Config.cls_2 ^ Config.cls_2xmul

    transformers = load_data_transformers(args.resize_resolution, args.crop_resolution, args.swap_num)

    # inital dataloader
    train_set = dataset(Config = Config,\
                        anno = Config.train_anno,\
                        common_aug = transformers["common_aug"],\
                        swap = transformers["swap"],\
                        totensor = transformers["train_totensor"],\
                        train = True)

    trainval_set = dataset(Config = Config,\
                        anno = Config.train_anno,\
                        common_aug = transformers["None"],\
예제 #2
0
    # args.resize_resolution=147
    # args.crop_resolution=129
    # # args.anno="/NAS/shenjintong/Tools/mmdnn/pytorch2caffe/inference_set.csv"
    # args.result_path="/NAS/shenjintong/Tools/mmdnn/pytorch2caffe/"
    # args.feature=True
    print(args)
    print(args.anno)
    # # todo: debug
    # args.anno = "/NAS/shenjintong/Dataset/ItargeCar/class_originbox/test_info.csv"
    # args.resume= "/NAS/shenjintong/DCL/net_model/DCL_512_448_41123_ItargeCar/model_best.pth"
    # args.CAM=True
    # args.opencv_save=True


    Config = LoadConfig(args, args.version)
    Config.cls_2xmul = True
    Config.cls_2 = False
    Config.no_loc = args.no_loc
    # sw define
    Config.size=(args.crop_resolution,args.crop_resolution)
    if args.log_dir:
        sw_log = args.log_dir
        sw = SummaryWriter(log_dir=sw_log)

    transformers = load_data_transformers(args.resize_resolution, args.crop_resolution, args.swap_num)

    # 由于args.version的作用只是自动选择对应的标记文件进行读取,去除version设置直接使用文件路径输入
    if args.anno:
        dataset_pd = pd.read_csv(args.anno)
    else:
        dataset_pd = Config.val_anno if args.version == 'val' else Config.test_anno
예제 #3
0
    # Get params
    # target_example = 0  # Snake
    # (original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
    #     get_example_params(target_example)
    # # Grad cam
    # grad_cam = GradCam(pretrained_model)
    # # Generate cam mask
    # cam = grad_cam.generate_cam(prep_img, target_class)
    # # Save mask
    # save_class_activation_images(original_image, cam, file_name_to_export)
    # print('Grad cam completed')

    args = parse_args()
    Config = LoadConfig(args, args.version)
    Config.cls_2 = True
    Config.cls_2xmul = False

    models = [
        'wide_resnet50_2', 'resnet50', 'resnext50_32x4d', 'se_resnext101_32x4d'
    ]
    weights = {
        'resnet50':
        'net_model/resnet50/weights_65_109_0.7044_0.8736.pth',
        'resnext50_32x4d':
        'net_model/resnext50_32x4d/weights_59_1549_0.7046_0.8772.pth',
        'se_resnext101_32x4d':
        'net_model/se_resnext101_32x4d/weights_18_4583_0.7152_0.8783.pth',
        'wide_resnet50_2':
        'net_model/wide_resnet50_2/weights_58_4051_0.7255_0.8865.pth'
    }
    imgs = glob('good_case_2/*.jpg')
예제 #4
0
파일: cam_app.py 프로젝트: yt7589/dcl
    def startup(self, args):
        i_debug = 18
        if 1 == i_debug:
            # 为无锡所招标预留功能开发
            app = WxsApp()
            app.startup(args)
            return
        print('模型热力图绘制应用 v0.1.0')
        os.environ['CUDA_VISIBLE_DEVICES'] = '2'
        args = self.parse_args()
        # arg_dict = vars(args)
        args.train_num_workers = 0
        args.val_num_workers = 0
        print(args, flush=True)
        Config = LoadConfig(args, 'train')
        Config.cls_2 = args.cls_2
        Config.cls_2xmul = args.cls_mul
        assert Config.cls_2 ^ Config.cls_2xmul
        transformers = load_data_transformers(args.resize_resolution,
                                              args.crop_resolution,
                                              args.swap_num)
        # inital dataloader
        train_set = dataset(Config = Config,\
                            anno = Config.train_anno,\
                            common_aug = transformers["common_aug"],\
                            swap = transformers["swap"],\
                            swap_size=args.swap_num, \
                            totensor = transformers["train_totensor"],\
                            train = True)
        trainval_set = dataset(Config = Config,\
                            anno = Config.val_anno,\
                            common_aug = transformers["None"],\
                            swap = transformers["None"],\
                            swap_size=args.swap_num, \
                            totensor = transformers["val_totensor"],\
                            train = False,
                            train_val = True)
        val_set = dataset(Config = Config,\
                          anno = Config.val_anno,\
                          common_aug = transformers["None"],\
                          swap = transformers["None"],\
                            swap_size=args.swap_num, \
                          totensor = transformers["test_totensor"],\
                          test=True)
        dataloader = {}
        dataloader['train'] = torch.utils.data.DataLoader(train_set,\
                                                    batch_size=args.train_batch,\
                                                    shuffle=True,\
                                                    num_workers=args.train_num_workers,\
                                                    collate_fn=collate_fn4train if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['train'], 'total_item_len', len(train_set))
        dataloader['trainval'] = torch.utils.data.DataLoader(trainval_set,\
                                                    batch_size=args.val_batch,\
                                                    shuffle=False,\
                                                    num_workers=args.val_num_workers,\
                                                    collate_fn=collate_fn4val if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['trainval'], 'total_item_len', len(trainval_set))
        setattr(dataloader['trainval'], 'num_cls', Config.num_brands)
        dataloader['val'] = torch.utils.data.DataLoader(val_set,\
                                                    batch_size=args.val_batch,\
                                                    shuffle=False,\
                                                    num_workers=args.val_num_workers,\
                                                    collate_fn=collate_fn4test if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['val'], 'total_item_len', len(val_set))
        setattr(dataloader['val'], 'num_cls', Config.num_brands)
        cudnn.benchmark = True
        print('Choose model and train set', flush=True)
        print('Choose model and train set', flush=True)
        model = MainModel(Config)

        # load model
        if (args.resume is None) and (not args.auto_resume):
            print('train from imagenet pretrained models ...', flush=True)
        else:
            if not args.resume is None:
                resume = args.resume
                print('load from pretrained checkpoint %s ...' % resume,
                      flush=True)
            elif args.auto_resume:
                resume = self.auto_load_resume(Config.save_dir)
                print('load from %s ...' % resume, flush=True)
            else:
                raise Exception("no checkpoints to load")

            model_dict = model.state_dict()
            pretrained_dict = torch.load(resume)
            print('train.py Ln193 resume={0};'.format(resume))
            pretrained_dict = {
                k[7:]: v
                for k, v in pretrained_dict.items() if k[7:] in model_dict
            }
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)
        print('Set cache dir', flush=True)
        time = datetime.datetime.now()
        filename = '%s_%d%d%d_%s' % (args.cam, time.month, time.day, time.hour,
                                     Config.dataset)
        save_dir = os.path.join(Config.save_dir, filename)
        print('save_dir: {0} + {1};'.format(Config.save_dir, filename))
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        model.cuda()
        cam_main_model = model
        cam_model = model.model
        model = nn.DataParallel(model)
        # optimizer prepare
        if Config.use_backbone:
            ignored_params = list(map(id, model.module.classifier.parameters())) \
                        + list(map(id, model.module.brand_clfr.parameters()))
        else:
            ignored_params1 = list(
                map(id, model.module.classifier.parameters()))
            ignored_params1x = list(
                map(id, model.module.brand_clfr.parameters()))
            ignored_params2 = list(
                map(id, model.module.classifier_swap.parameters()))
            ignored_params3 = list(map(id, model.module.Convmask.parameters()))
            ignored_params = ignored_params1 + ignored_params1x + ignored_params2 + ignored_params3
        print('the num of new layers:', len(ignored_params), flush=True)
        base_params = filter(lambda p: id(p) not in ignored_params,
                             model.module.parameters())
        lr_ratio = args.cls_lr_ratio
        base_lr = args.base_lr
        momentum = 0.9
        if Config.use_backbone:
            optimizer = optim.SGD(
                [{
                    'params': base_params
                }, {
                    'params': model.module.classifier.parameters(),
                    'lr': base_lr
                }, {
                    'params': model.module.brand_clfr.parameters(),
                    'lr': base_lr
                }],
                lr=base_lr,
                momentum=momentum)
        else:
            optimizer = optim.SGD([
                {
                    'params': base_params
                },
                {
                    'params': model.module.classifier.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.brand_clfr.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.classifier_swap.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.Convmask.parameters(),
                    'lr': lr_ratio * base_lr
                },
            ],
                                  lr=base_lr,
                                  momentum=momentum)

        exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                               step_size=args.decay_step,
                                               gamma=0.1)
        # *******************
        # *******************
        print('model: {0};'.format(cam_model))
        print('avgpoo: {0};'.format(cam_main_model.avgpool))
        headers = {
            'avgpool': cam_main_model.avgpool,
            'classifier': cam_main_model.brand_clfr
        }
        grad_cam = GradCam(model=cam_model, feature_module=cam_model[7], \
                       target_layer_names=["2"], headers=headers, use_cuda=True)
        # 读入图片数据
        img = None
        img_file = '/media/ps/0A9AD66165F33762/yantao/dcl/support/ds_files/wxs_ds/head/car/d00/d00/d00/d00/d96/SC7168CH5_冀B591C5_02_120000100604_120000702916290242.jpg'
        with open(img_file, 'rb') as f:
            with Image.open(f) as img:
                img = img.convert('RGB')

        crop_reso = 224
        to_tensor = transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            # ImageNetPolicy(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
        img_obj = to_tensor(img)
        input = img_obj.reshape(1, 3, 224, 224)
        input.cuda()
        input.requires_grad_(True)
        print('input: {0};'.format(input.shape))
        # If None, returns the map for the highest scoring category.
        # Otherwise, targets the requested index.
        target_index = None
        mask = grad_cam(input, target_index)
        #
        self.show_cam_on_image(img_file, mask)
        #
        gb_model = GuidedBackpropReLUModel(model=cam_main_model, use_cuda=True)
        gb = gb_model(input, index=target_index)
        gb = gb.transpose((1, 2, 0))
        cam_mask = cv2.merge([mask, mask, mask])
        cam_gb = self.deprocess_image(cam_mask * gb)
        gb = self.deprocess_image(gb)
        cv2.imwrite('gb.jpg', gb)
        cv2.imwrite('cam_gb.jpg', cam_gb)

        print('^_^ The End! 002 ^_^')