Exemple #1
0
def load_data_transformers(resize_reso=512, crop_reso=448, swap_num=[7, 7]):
    center_resize = 600
    Normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    data_transforms = {
       	'swap': transforms.Compose([
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'common_aug': transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso,crop_reso)),
            transforms.RandomHorizontalFlip(),
        ]),
        'train_totensor': transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            # ImageNetPolicy(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val_totensor': transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'test_totensor': transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.CenterCrop((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'None': None,
    }
    return data_transforms
Exemple #2
0
def classifier_batch(input_imgs, net):

    """
    :param img_addr: a string that contains the address to the image
    :param boundary:  optional, used for cropping the function
                should be a tuple of 4 int elements: (x_min, y_min, x_max, y_max)
     :param net: well-trained model
    :returns: a tuple (predict class, confidence score)
              predict class: from 1 to 196
    """

    crop_size = (224, 224)  # size of cropped images for 'See Better'
    


    '''
    transform = transforms.Compose([
        transforms.Resize(size=(224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    '''
    transform = transforms.Compose([
        transforms.Resize(size=(224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.485, 0.485], std=[0.225, 0.225, 0.225])
    ])
    input_ = []
    for input_path in input_imgs:
        #input_img = Image.open(input_path).convert('RGB')
        #print(input_path)
        tmp =cv2.imread(input_path)
        tmp=cv2.resize(tmp, (224,224))
        input_img = Image.fromarray(tmp)
        input_img = transform(input_img)
        input_img = torch.unsqueeze(input_img, 0)
        input_.append(input_img)
    X = torch.cat(input_, dim=0)
    X = X.to(torch.device("cuda"))


    outputs = net(X)
    #print(outputs[0].shape,outputs[1].shape,outputs[2].shape,Config.numcls,  )
    y_pred = outputs[0]# + outputs[1][:,0:Config.numcls] + outputs[1][:,Config.numcls:2*Config.numcls]

    #print(y_pred)
    if True:
        result_ = []
        for y in y_pred:
            _, pred_idx = y.topk(1, 0, True, True)

            s = torch.nn.Softmax(dim=0)
            confidence_score = max(s(y)).item()
            y_pred = pred_idx.item()
            result_.append((y_pred, confidence_score))

        return result_    
Exemple #3
0
 def _val_image_transform(self):
     transform = transforms.Compose([
         transforms.Resize(256),
         transforms.CenterCrop(224),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
     return transform
Exemple #4
0
def img_transforms(img, label, crop_size):
    img, label = random_crop(img, label, crop_size)
    img_tfs = tfs.Compose([
        tfs.ToTensor(),
        tfs.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    img = img_tfs(img)
    label = image2label(label)
    label = torch.from_numpy(label)
    return img, label
Exemple #5
0
    def __init__(self,
                 model_root_pre_path='',
                 dataset='FER2013',
                 tr_using_crop=False,
                 *args,
                 **kwargs):
        self.model_root_pre_path = model_root_pre_path

        self.model = ACCNN(7,
                           pre_trained=True,
                           dataset=dataset,
                           root_pre_path=model_root_pre_path,
                           fold=5,
                           virtualize=True,
                           using_fl=False).to(DEVICE)
        self.model_fl = ACCNN(7,
                              pre_trained=True,
                              dataset=dataset,
                              root_pre_path=model_root_pre_path,
                              fold=5,
                              virtualize=True,
                              using_fl=True).to(DEVICE)
        self.tr_using_crop = tr_using_crop
        if self.tr_using_crop:
            crop_img_size = int(self.model.input_size * 1.2)
            self.transform_test = transforms.Compose([
                transforms.Resize(crop_img_size),
                transforms.TenCrop(self.model.input_size),
                transforms.Lambda(lambda crops: torch.stack([
                    transforms.Normalize(IMG_MEAN, IMG_STD)
                    (transforms.ToTensor()(crop)) for crop in crops
                ])),
            ])
        else:
            self.transform_test = transforms.Compose([
                transforms.Resize(int(self.model.input_size)),
                transforms.ToTensor(),
                transforms.Normalize(IMG_MEAN, IMG_STD),
            ])
Exemple #6
0
 def _train_image_transform(self):
     transform = transforms.Compose([
         transforms.Resize(256),
         transforms.RandomCrop(224),
         transforms.RandomHorizontalFlip(),
         transforms.RandomVerticalFlip(),
         transforms.RandomRotation(0.2),
         transforms.ColorJitter(0.1, 0, 0, 0),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
     return transform
Exemple #7
0
def load_transforms(args, random_chance=1):

    # s determins random offset factor. If random_chance==0 (e.g. during testing), then we want no random offset.
    if random_chance == 0:
        s = 0
    else:
        s = 1

    transforms = tfs.ComposeMRI([
        tfs.LoadNifti(),
        tfs.RandomScaling(scale_range=[.95, 1.05]),  #.95,1.05
        tfs.RandomRotation(angle_interval=[-10, 10],
                           rotation_axis=None),  #-10,10
        tfs.ApplyAffine(so=2, chance=random_chance),
        tfs.Gamma(gamma_range=[.9, 1.1], chance=random_chance),  #.9,1.1
        tfs.ReturnImageData(),
        tfs.Crop(dims=args['img_dim'], offset=[0, -0, 0], rand_offset=5 * s),
        tfs.ReduceSlices(2, 2),
        tfs.PrcCap(),
        tfs.UnitInterval(),
        tfs.ToTensor(),
    ])

    return transforms
Exemple #8
0
def load_data_transformers(resize_reso=512, crop_reso=448, swap_num=[7, 7]):
    center_resize = 600
    Normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
    data_transforms = {
        'swap':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'food_swap':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=90),
            #transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.RandomResizedCrop(size=crop_reso, scale=(0.75, 1)),
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'food_unswap':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=90),
            #transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.RandomResizedCrop(size=crop_reso, scale=(0.75, 1)),
        ]),
        'unswap':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
        ]),
        'train_totensor':
        transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            #ImageNetPolicy(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val_totensor':
        transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'test_totensor':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.CenterCrop((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'None':
        None,
        'Centered_swap':
        transforms.Compose([
            transforms.CenterCrop((center_resize, center_resize)),
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'Centered_unswap':
        transforms.Compose([
            transforms.CenterCrop((center_resize, center_resize)),
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
        ]),
        'Tencrop':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.TenCrop((crop_reso, crop_reso)),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
        ])
    }

    return data_transforms
Exemple #9
0
             pre_trained=True,
             dataset=dataset,
             fold=fold,
             virtualize=True,
             using_fl=fl).to(DEVICE)
 print("------------%s Model Already be Prepared------------" % net_name)
 input_img_size = net.input_size
 IMG_MEAN = [0.449]
 IMG_STD = [0.226]
 transform_train = transforms.Compose([
     transforms.Resize(
         input_img_size
     ),  # 缩放将图片的最小边缩放为 input_img_size,因此如果输入是非正方形的,那么输出也不是正方形的
     transforms.RandomHorizontalFlip(),
     transforms.RandomRotation(30),
     transforms.ToTensor(),
     transforms.Normalize(IMG_MEAN, IMG_STD),
 ])
 transform_test = transforms.Compose([
     transforms.Resize(
         input_img_size
     ),  # 缩放将图片的最小边缩放为 input_img_size,因此如果输入是非正方形的,那么输出也不是正方形的
     transforms.ToTensor(),
     transforms.Normalize(IMG_MEAN, IMG_STD),
 ])
 print("------------Preparing Data...----------------")
 if dataset == "JAFFE":
     test_data = JAFFE(is_train=False,
                       transform=transform_test,
                       target_type=target_type,
                       using_fl=fl)
Exemple #10
0
import torch
from model import fcn
from data.voc import VocSegDataset, img_transforms, COLORMAP, inverse_normalization, CLASSES
from PIL import Image
import transforms.transforms as tfs
import numpy as np
import cv2
from data import voc

num_classes = len(CLASSES)
model = fcn.FcnResNet(num_classes)
model.load_state_dict(torch.load('FCN_resnet34.pkl'))

input_shape = (320, 480)
img_tfs = tfs.Compose([
    tfs.ToTensor(),
    tfs.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

img = Image.open(r'E:\DataSet\VOC2012\JPEGImages\2007_000033.jpg')
# img = Image.open(r'H:/20180706204556.png').convert('RGB')
label_img = Image.open(
    r'E:\DataSet\VOC2012\SegmentationClass\2007_000033.png').convert('RGB')
img, label_img = voc.random_crop(img, label_img, (320, 480))
img.show()
label_img.show()

img = img_tfs(img)
img = img.view(1, img.shape[0], img.shape[1], img.shape[2])
result = model(img)
print(result.shape)
Exemple #11
0
    def startup(self, args):
        i_debug = 18
        if 1 == i_debug:
            # 为无锡所招标预留功能开发
            app = WxsApp()
            app.startup(args)
            return
        print('模型热力图绘制应用 v0.1.0')
        os.environ['CUDA_VISIBLE_DEVICES'] = '2'
        args = self.parse_args()
        # arg_dict = vars(args)
        args.train_num_workers = 0
        args.val_num_workers = 0
        print(args, flush=True)
        Config = LoadConfig(args, 'train')
        Config.cls_2 = args.cls_2
        Config.cls_2xmul = args.cls_mul
        assert Config.cls_2 ^ Config.cls_2xmul
        transformers = load_data_transformers(args.resize_resolution,
                                              args.crop_resolution,
                                              args.swap_num)
        # inital dataloader
        train_set = dataset(Config = Config,\
                            anno = Config.train_anno,\
                            common_aug = transformers["common_aug"],\
                            swap = transformers["swap"],\
                            swap_size=args.swap_num, \
                            totensor = transformers["train_totensor"],\
                            train = True)
        trainval_set = dataset(Config = Config,\
                            anno = Config.val_anno,\
                            common_aug = transformers["None"],\
                            swap = transformers["None"],\
                            swap_size=args.swap_num, \
                            totensor = transformers["val_totensor"],\
                            train = False,
                            train_val = True)
        val_set = dataset(Config = Config,\
                          anno = Config.val_anno,\
                          common_aug = transformers["None"],\
                          swap = transformers["None"],\
                            swap_size=args.swap_num, \
                          totensor = transformers["test_totensor"],\
                          test=True)
        dataloader = {}
        dataloader['train'] = torch.utils.data.DataLoader(train_set,\
                                                    batch_size=args.train_batch,\
                                                    shuffle=True,\
                                                    num_workers=args.train_num_workers,\
                                                    collate_fn=collate_fn4train if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['train'], 'total_item_len', len(train_set))
        dataloader['trainval'] = torch.utils.data.DataLoader(trainval_set,\
                                                    batch_size=args.val_batch,\
                                                    shuffle=False,\
                                                    num_workers=args.val_num_workers,\
                                                    collate_fn=collate_fn4val if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['trainval'], 'total_item_len', len(trainval_set))
        setattr(dataloader['trainval'], 'num_cls', Config.num_brands)
        dataloader['val'] = torch.utils.data.DataLoader(val_set,\
                                                    batch_size=args.val_batch,\
                                                    shuffle=False,\
                                                    num_workers=args.val_num_workers,\
                                                    collate_fn=collate_fn4test if not Config.use_backbone else collate_fn4backbone,
                                                    drop_last=True if Config.use_backbone else False,
                                                    pin_memory=True)
        setattr(dataloader['val'], 'total_item_len', len(val_set))
        setattr(dataloader['val'], 'num_cls', Config.num_brands)
        cudnn.benchmark = True
        print('Choose model and train set', flush=True)
        print('Choose model and train set', flush=True)
        model = MainModel(Config)

        # load model
        if (args.resume is None) and (not args.auto_resume):
            print('train from imagenet pretrained models ...', flush=True)
        else:
            if not args.resume is None:
                resume = args.resume
                print('load from pretrained checkpoint %s ...' % resume,
                      flush=True)
            elif args.auto_resume:
                resume = self.auto_load_resume(Config.save_dir)
                print('load from %s ...' % resume, flush=True)
            else:
                raise Exception("no checkpoints to load")

            model_dict = model.state_dict()
            pretrained_dict = torch.load(resume)
            print('train.py Ln193 resume={0};'.format(resume))
            pretrained_dict = {
                k[7:]: v
                for k, v in pretrained_dict.items() if k[7:] in model_dict
            }
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)
        print('Set cache dir', flush=True)
        time = datetime.datetime.now()
        filename = '%s_%d%d%d_%s' % (args.cam, time.month, time.day, time.hour,
                                     Config.dataset)
        save_dir = os.path.join(Config.save_dir, filename)
        print('save_dir: {0} + {1};'.format(Config.save_dir, filename))
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        model.cuda()
        cam_main_model = model
        cam_model = model.model
        model = nn.DataParallel(model)
        # optimizer prepare
        if Config.use_backbone:
            ignored_params = list(map(id, model.module.classifier.parameters())) \
                        + list(map(id, model.module.brand_clfr.parameters()))
        else:
            ignored_params1 = list(
                map(id, model.module.classifier.parameters()))
            ignored_params1x = list(
                map(id, model.module.brand_clfr.parameters()))
            ignored_params2 = list(
                map(id, model.module.classifier_swap.parameters()))
            ignored_params3 = list(map(id, model.module.Convmask.parameters()))
            ignored_params = ignored_params1 + ignored_params1x + ignored_params2 + ignored_params3
        print('the num of new layers:', len(ignored_params), flush=True)
        base_params = filter(lambda p: id(p) not in ignored_params,
                             model.module.parameters())
        lr_ratio = args.cls_lr_ratio
        base_lr = args.base_lr
        momentum = 0.9
        if Config.use_backbone:
            optimizer = optim.SGD(
                [{
                    'params': base_params
                }, {
                    'params': model.module.classifier.parameters(),
                    'lr': base_lr
                }, {
                    'params': model.module.brand_clfr.parameters(),
                    'lr': base_lr
                }],
                lr=base_lr,
                momentum=momentum)
        else:
            optimizer = optim.SGD([
                {
                    'params': base_params
                },
                {
                    'params': model.module.classifier.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.brand_clfr.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.classifier_swap.parameters(),
                    'lr': lr_ratio * base_lr
                },
                {
                    'params': model.module.Convmask.parameters(),
                    'lr': lr_ratio * base_lr
                },
            ],
                                  lr=base_lr,
                                  momentum=momentum)

        exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                               step_size=args.decay_step,
                                               gamma=0.1)
        # *******************
        # *******************
        print('model: {0};'.format(cam_model))
        print('avgpoo: {0};'.format(cam_main_model.avgpool))
        headers = {
            'avgpool': cam_main_model.avgpool,
            'classifier': cam_main_model.brand_clfr
        }
        grad_cam = GradCam(model=cam_model, feature_module=cam_model[7], \
                       target_layer_names=["2"], headers=headers, use_cuda=True)
        # 读入图片数据
        img = None
        img_file = '/media/ps/0A9AD66165F33762/yantao/dcl/support/ds_files/wxs_ds/head/car/d00/d00/d00/d00/d96/SC7168CH5_冀B591C5_02_120000100604_120000702916290242.jpg'
        with open(img_file, 'rb') as f:
            with Image.open(f) as img:
                img = img.convert('RGB')

        crop_reso = 224
        to_tensor = transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            # ImageNetPolicy(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
        img_obj = to_tensor(img)
        input = img_obj.reshape(1, 3, 224, 224)
        input.cuda()
        input.requires_grad_(True)
        print('input: {0};'.format(input.shape))
        # If None, returns the map for the highest scoring category.
        # Otherwise, targets the requested index.
        target_index = None
        mask = grad_cam(input, target_index)
        #
        self.show_cam_on_image(img_file, mask)
        #
        gb_model = GuidedBackpropReLUModel(model=cam_main_model, use_cuda=True)
        gb = gb_model(input, index=target_index)
        gb = gb.transpose((1, 2, 0))
        cam_mask = cv2.merge([mask, mask, mask])
        cam_gb = self.deprocess_image(cam_mask * gb)
        gb = self.deprocess_image(gb)
        cv2.imwrite('gb.jpg', gb)
        cv2.imwrite('cam_gb.jpg', cam_gb)

        print('^_^ The End! 002 ^_^')
Exemple #12
0
    IMG_MEAN = [0.449]
    IMG_STD = [0.226]
    # for RGB images
    # IMG_MEAN = [0.485, 0.456, 0.406]
    # IMG_STD = [0.229, 0.224, 0.225]

    crop_img_size = int(net.input_size * 1.2)
    input_img_size = net.input_size
    transform_using_crop = opt.tr_using_crop
    if transform_using_crop:
        transform_train = transforms.Compose([
            transforms.Resize(crop_img_size),
            transforms.TenCrop(input_img_size),
            transforms.Lambda(lambda crops: torch.stack([
                transforms.Normalize(IMG_MEAN, IMG_STD)
                (transforms.ToTensor()(transforms.RandomHorizontalFlip()(
                    transforms.RandomRotation(30)(crop)))) for crop in crops
            ])),
        ])
        transform_test = transforms.Compose([
            transforms.Resize(crop_img_size),
            transforms.TenCrop(input_img_size),
            transforms.Lambda(lambda crops: torch.stack([
                transforms.Normalize(IMG_MEAN, IMG_STD)
                (transforms.ToTensor()(crop)) for crop in crops
            ])),
        ])
    else:
        transform_train = transforms.Compose([
            transforms.Resize(
                input_img_size
            ),  # 缩放将图片的最小边缩放为 input_img_size,因此如果输入是非正方形的,那么输出也不是正方形的