Пример #1
0
 def __call__(self, results):
     print('corrupt')
     i += 1
     results['img'] = corrupt(results['img'].astype(np.uint8),
                              corruption_name=self.corruption,
                              severity=self.severity)
     results['template_img'] = corrupt(results['template_img'].astype(
         np.uint8),
                                       corruption_name=self.corruption,
                                       severity=self.severity)
     return results
Пример #2
0
 def __call__(self, results):
     if corrupt is None:
         raise RuntimeError('imagecorruptions is not installed')
     results['img'] = corrupt(results['img'].astype(np.uint8),
                              corruption_name=self.corruption,
                              severity=self.severity)
     return results
    def prepare_test_img(self, idx):
        img_info = self.img_infos[idx]
        # load image
        img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
        # corrupt image
        if self.corruption is not None:
            img = corrupt(img,
                          severity=self.corruption_severity,
                          corruption_name=self.corruption)

        def prepare_single(img, scale, flip, proposal=None):
            _img, img_shape, pad_shape, scale_factor = self.img_transform(
                img, scale, flip, keep_ratio=True)
            _img = to_tensor(_img)
            _img_meta = dict(ori_shape=(img.shape[0], img.shape[1], 3),
                             img_shape=img_shape,
                             pad_shape=pad_shape,
                             scale_factor=scale_factor,
                             flip=flip)
            return _img, _img_meta

        imgs = []
        img_metas = []
        for scale in self.img_scales:
            _img, _img_meta = prepare_single(img, scale, False, None)
            imgs.append(_img)
            img_metas.append(DC(_img_meta, cpu_only=True))
        data = dict(img=imgs, img_meta=img_metas)
        return data
Пример #4
0
 def __call__(self, results):
     results["img"] = corrupt(
         results["img"].astype(np.uint8),
         corruption_name=self.corruption,
         severity=self.severity,
     )
     return results
Пример #5
0
def process_image(file_path: Path, max_severity: int, output_dir: Path) -> None:
    image = load_rgb(file_path)
    for corruption in get_corruption_names():
        for severity in range(max_severity):
            corrupted = corrupt(image, corruption_name=corruption, severity=severity + 1)
            corrupted = bgr2rgb(corrupted)
            cv2.imwrite(
                str(output_dir.joinpath(f"{file_path.stem}_{corruption}_{severity + 1}{file_path.suffix}")), corrupted
            )
Пример #6
0
 def __call__(self, results):
     if corrupt is None:
         raise RuntimeError('imagecorruptions is not installed')
     if 'img_fields' in results:
         assert results['img_fields'] == ['img'], \
             'Only single img_fields is allowed'
     results['img'] = corrupt(results['img'].astype(np.uint8),
                              corruption_name=self.corruption,
                              severity=self.severity)
     return results
Пример #7
0
def corrup(img):
    copyimg = img.copy()
    imglist = []
    for corruption in get_corruption_names():
        for severity in range(3):
            corrupted = corrupt(copyimg,
                                corruption_name=corruption,
                                severity=severity + 1)
            imglist.append(corrupted)
    return imglist
Пример #8
0
def apply_random_corruption(img, test=False):
    severity = np.random.choice(6)
    if severity == 0:
        return img
    else:
        img = np.array(img).astype(np.uint8)
        if test:
            corruption = np.random.choice(ic.get_corruption_names())
        else:
            corruption = np.random.choice(ic.get_corruption_names('validation'))
        corrupted = ic.corrupt(img, severity=severity, corruption_name=corruption)
        corrupted = Image.fromarray(corrupted)
        return corrupted
Пример #9
0
 def prepare_test_img(self, idx):
     img_info = self.img_infos[idx]
     img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
     rmse = np.zeros((5, 19))
     for c in range(19):
         for s in np.arange(1, 6):
             corrupted_img = corrupt(img, severity=s, corruption_number=c)
             try:
                 rmse[s - 1][c] = np.sqrt(np.mean((img - corrupted_img)**2))
             except:
                 e = sys.exc_info()[0]
                 print(
                     "Error occured in file %s with index %d for corruption %d severity %d"
                     % (img_info['filename'], idx, c, s))
                 print(e)
                 exit()
     return rmse
Пример #10
0
    def __call__(self, results):
        """Call function to corrupt image.

        Args:
            results (dict): Result dict from loading pipeline.

        Returns:
            dict: Result dict with images corrupted.
        """

        if corrupt is None:
            raise RuntimeError('imagecorruptions is not installed')
        if 'img_fields' in results:
            assert results['img_fields'] == ['img'], \
                'Only single img_fields is allowed'
        results['img'] = corrupt(results['img'].astype(np.uint8),
                                 corruption_name=self.corruption,
                                 severity=self.severity)
        return results
Пример #11
0
def load_image(self, index):
    # loads 1 image from dataset, returns img, original hw, resized hw
    img = self.imgs[index]
    if img is None:  # not cached
        path = self.img_files[index]
        img = cv2.imread(path)  # BGR
        if self.corruption_num is not None:
            img = corrupt(img,
                          severity=self.severity,
                          corruption_number=self.corruption_num)
        assert img is not None, 'Image Not Found ' + path
        h0, w0 = img.shape[:2]  # orig hw
        r = self.img_size / max(h0, w0)  # resize image to img_size
        if r != 1:  # always resize down, only resize up if training with augmentation
            interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
            img = cv2.resize(img, (int(w0 * r), int(h0 * r)),
                             interpolation=interp)
        return img, (h0, w0), img.shape[:2]  # img, hw_original, hw_resized
    else:
        return self.imgs[index], self.img_hw0[index], self.img_hw[
            index]  # img, hw_original, hw_resized
Пример #12
0
    def prepare_train_img(self, idx):
        img_info = self.img_infos[idx]
        # load image
        img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
        # corruption
        if self.corruption is not None:
            img = corrupt(img,
                          severity=self.corruption_severity,
                          corruption_name=self.corruption)
        # load proposals if necessary
        if self.proposals is not None:
            proposals = self.proposals[idx][:self.num_max_proposals]
            # TODO: Handle empty proposals properly. Currently images with
            # no proposals are just ignored, but they can be used for
            # training in concept.
            if len(proposals) == 0:
                return None
            if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
                raise AssertionError(
                    'proposals should have shapes (n, 4) or (n, 5), '
                    'but found {}'.format(proposals.shape))
            if proposals.shape[1] == 5:
                scores = proposals[:, 4, None]
                proposals = proposals[:, :4]
            else:
                scores = None

        ann = self.get_ann_info(idx)
        gt_bboxes = ann['bboxes']
        gt_labels = ann['labels']
        if self.with_crowd:
            gt_bboxes_ignore = ann['bboxes_ignore']

        # skip the image if there is no valid gt bbox
        if len(gt_bboxes) == 0 and self.skip_img_without_anno:
            warnings.warn('Skip the image "%s" that has no valid gt bbox' %
                          osp.join(self.img_prefix, img_info['filename']))
            return None

        # extra augmentation
        if self.extra_aug is not None:
            img, gt_bboxes, gt_labels = self.extra_aug(img, gt_bboxes,
                                                       gt_labels)

        # apply transforms
        flip = True if np.random.rand() < self.flip_ratio else False
        # randomly sample a scale
        img_scale = random_scale(self.img_scales, self.multiscale_mode)
        img, img_shape, pad_shape, scale_factor = self.img_transform(
            img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
        img = img.copy()
        if self.with_seg:
            gt_seg = mmcv.imread(osp.join(
                self.seg_prefix, img_info['filename'].replace('jpg', 'png')),
                                 flag='unchanged')
            gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
            gt_seg = mmcv.imrescale(gt_seg,
                                    self.seg_scale_factor,
                                    interpolation='nearest')
            gt_seg = gt_seg[None, ...]
        if self.proposals is not None:
            proposals = self.bbox_transform(proposals, img_shape, scale_factor,
                                            flip)
            proposals = np.hstack([proposals, scores
                                   ]) if scores is not None else proposals
        gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
                                        flip)
        if self.with_crowd:
            gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
                                                   scale_factor, flip)
        if self.with_mask:
            gt_masks = self.mask_transform(ann['masks'], pad_shape,
                                           scale_factor, flip)

        ori_shape = (img_info['height'], img_info['width'], 3)
        img_meta = dict(ori_shape=ori_shape,
                        img_shape=img_shape,
                        pad_shape=pad_shape,
                        scale_factor=scale_factor,
                        flip=flip)

        data = dict(img=DC(to_tensor(img), stack=True),
                    img_meta=DC(img_meta, cpu_only=True),
                    gt_bboxes=DC(to_tensor(gt_bboxes)))
        if self.proposals is not None:
            data['proposals'] = DC(to_tensor(proposals))
        if self.with_label:
            data['gt_labels'] = DC(to_tensor(gt_labels))
        if self.with_crowd:
            data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
        if self.with_mask:
            data['gt_masks'] = DC(gt_masks, cpu_only=True)
        if self.with_seg:
            data['gt_semantic_seg'] = DC(to_tensor(gt_seg), stack=True)
        return data
Пример #13
0
 def __call__(self, results):
     results['img'] = corrupt(results['img'].astype(np.uint8),
                              corruption_name=self.corruption,
                              severity=self.severity)
     # cv_showimg(**results)
     return results
Пример #14
0
    def prepare_train_img(self, idx):
        """
        get image according to the idx.
        and convert the mask to polar mask.
        :param idx:
        :return:data,
        a dict contains img, img_meta,gt_bboxes,gt_labels,gt_masks,gt_bboxes_ignore,_gt_labels,_gt_bboxes,_gt_masks
        """
        # the img_info comes from coco annotation file. initial when the class constructs.
        img_info = self.img_infos[idx]

        # image size is (w,h,3). this is different from torch.
        img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))

        learning = False
        if learning:
            img_id = self.img_infos[idx]['id']
            ann_ids = self.coco.getAnnIds(imgIds=[img_id])
            ann_info = self.coco.loadAnns(ann_ids)
            plt.imshow(img)
            plt.axis('off')
            self.coco.showAnns(ann_info)
            plt.show()

        # corruption, used for augmentation the image, for degrade the image quality.
        if self.corruption is not None:
            img = corrupt(img,
                          severity=self.corruption_severity,
                          corruption_name=self.corruption)
        # load proposals if necessary. proposals is for what???
        if self.proposals is not None:
            proposals = self.proposals[idx][:self.num_max_proposals]
            # TODO: Handle empty proposals properly. Currently images with
            # no proposals are just ignored, but they can be used for
            # training in concept.
            if len(proposals) == 0:
                return None
            if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
                raise AssertionError(
                    'proposals should have shapes (n, 4) or (n, 5), '
                    'but found {}'.format(proposals.shape))
            if proposals.shape[1] == 5:
                scores = proposals[:, 4, None]
                proposals = proposals[:, :4]
            else:
                scores = None

        ann = self.get_ann_info(
            idx
        )  # ann has been converted for detection application.bbox(x1,y1,x2,y2)

        gt_bboxes = ann['bboxes']
        gt_labels = ann['labels']
        if self.with_crowd:
            gt_bboxes_ignore = ann['bboxes_ignore']

        # skip the image if there is no valid gt bbox
        if len(gt_bboxes) == 0 and self.skip_img_without_anno:
            warnings.warn('Skip the image "%s" that has no valid gt bbox' %
                          osp.join(self.img_prefix, img_info['filename']))
            return None

        # apply transforms
        flip = True if np.random.rand() < self.flip_ratio else False
        # randomly sample a scale
        img_scale = random_scale(self.img_scales, self.multiscale_mode)
        img, img_shape, pad_shape, scale_factor = self.img_transform(
            img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
        # img size (3,w,h). And the img has been normalized, which means it can't be shown.

        img = img.copy()
        if self.with_seg:
            gt_seg = mmcv.imread(osp.join(
                self.seg_prefix, img_info['filename'].replace('jpg', 'png')),
                                 flag='unchanged')
            gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
            gt_seg = mmcv.imrescale(gt_seg,
                                    self.seg_scale_factor,
                                    interpolation='nearest')
            gt_seg = gt_seg[None, ...]
        if self.proposals is not None:
            proposals = self.bbox_transform(proposals, img_shape, scale_factor,
                                            flip)
            proposals = np.hstack([proposals, scores
                                   ]) if scores is not None else proposals
        gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
                                        flip)
        if self.with_crowd:
            gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
                                                   scale_factor, flip)
        if self.with_mask:
            gt_masks = self.mask_transform(ann['masks'], pad_shape,
                                           scale_factor, flip)

        ori_shape = (img_info['height'], img_info['width'], 3)
        img_meta = dict(ori_shape=ori_shape,
                        img_shape=img_shape,
                        pad_shape=pad_shape,
                        scale_factor=scale_factor,
                        flip=flip)

        data = dict(img=DC(to_tensor(img), stack=True),
                    img_meta=DC(img_meta, cpu_only=True),
                    gt_bboxes=DC(to_tensor(gt_bboxes)))

        if self.with_label:
            data['gt_labels'] = DC(to_tensor(gt_labels))
        if self.with_crowd:
            data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
        if self.with_mask:
            data['gt_masks'] = DC(gt_masks, cpu_only=True)

        #--------------------offline ray label generation-----------------------------

        self.center_sample = True
        self.use_mask_center = True
        self.radius = 1.5
        self.strides = [8, 16, 32, 64, 128]
        self.regress_ranges = ((-1, 64), (64, 128), (128, 256), (256, 512),
                               (512, INF))
        featmap_sizes = self.get_featmap_size(pad_shape)
        self.featmap_sizes = featmap_sizes
        num_levels = len(self.strides)
        all_level_points = self.get_points(
            featmap_sizes)  # receptive filed map [feature map,img_field]
        self.num_points_per_level = [i.size()[0] for i in all_level_points]
        # the point numbers of each level of feature map
        expanded_regress_ranges = [
            all_level_points[i].new_tensor(
                self.regress_ranges[i])[None].expand_as(all_level_points[i])
            for i in range(num_levels)
        ]
        concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
        concat_points = torch.cat(
            all_level_points,
            0)  # convert all feature points to a vector, size = 20460
        gt_masks = gt_masks[:len(gt_bboxes)]

        gt_bboxes = torch.Tensor(gt_bboxes)
        gt_labels = torch.Tensor(gt_labels)

        _labels, _bbox_targets, _mask_targets = self.polar_target_single(
            gt_bboxes, gt_masks, gt_labels, concat_points,
            concat_regress_ranges)

        data['_gt_labels'] = DC(_labels)
        data['_gt_bboxes'] = DC(_bbox_targets)
        data['_gt_masks'] = DC(_mask_targets)
        #--------------------offline ray label generation-----------------------------

        return data
Пример #15
0
from imagecorruptions import corrupt
from PIL import Image
import os
import numpy as np
from tqdm import tqdm

f = open('datasets/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt', 'r')

img_paths = list()

for line in f:
    line = line[:-1]
    # print(line)
    img_path = os.path.join('datasets/VOCdevkit/VOC2012/JPEGImages',
                            line + '.jpg')
    img_paths.append(img_path)

f.close()

for img_path in tqdm(img_paths):
    image = Image.open(img_path)
    image = np.asarray(image)

    corrupted_image = corrupt(image, severity=1, corruption_name='fog')
    # corrupted_image = corrupt(image, severity=1, corruption_name='snow')

    image = Image.fromarray(corrupted_image)
    image.save(
        os.path.join('datasets/Foggy_VOC/JPEGImages',
                     os.path.basename(img_path)))
Пример #16
0
    def prepare_train_img(self, idx):
        img_info = self.img_infos[idx]
        img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
        # corruption
        if self.corruption is not None:
            img = corrupt(
                img,
                severity=self.corruption_severity,
                corruption_name=self.corruption)
        # load proposals if necessary
        if self.proposals is not None:
            proposals = self.proposals[idx][:self.num_max_proposals]
            # TODO: Handle empty proposals properly. Currently images with
            # no proposals are just ignored, but they can be used for
            # training in concept.
            if len(proposals) == 0:
                return None
            if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
                raise AssertionError(
                    'proposals should have shapes (n, 4) or (n, 5), '
                    'but found {}'.format(proposals.shape))
            if proposals.shape[1] == 5:
                scores = proposals[:, 4, None]
                proposals = proposals[:, :4]
            else:
                scores = None

        ann = self.get_ann_info(idx)

        gt_bboxes = ann['bboxes']
        gt_labels = ann['labels']
        if self.with_crowd:
            gt_bboxes_ignore = ann['bboxes_ignore']

        # skip the image if there is no valid gt bbox
        if len(gt_bboxes) == 0 and self.skip_img_without_anno:
            warnings.warn('Skip the image "%s" that has no valid gt bbox' %
                          osp.join(self.img_prefix, img_info['filename']))
            return None

        # apply transforms
        flip = True if np.random.rand() < self.flip_ratio else False
        # randomly sample a scale
        img_scale = random_scale(self.img_scales, self.multiscale_mode)
        img, img_shape, pad_shape, scale_factor = self.img_transform(img, img_scale, flip, keep_ratio=self.resize_keep_ratio)


        img = img.copy()
        if self.with_seg:
            gt_seg = mmcv.imread(
                osp.join(self.seg_prefix,
                         img_info['filename'].replace('jpg', 'png')),
                flag='unchanged')
            gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
            gt_seg = mmcv.imrescale(
                gt_seg, self.seg_scale_factor, interpolation='nearest')
            gt_seg = gt_seg[None, ...]
        if self.proposals is not None:
            proposals = self.bbox_transform(proposals, img_shape, scale_factor,
                                            flip)
            proposals = np.hstack([proposals, scores
                                   ]) if scores is not None else proposals
        gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
                                        flip)
        if self.with_crowd:
            gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
                                                   scale_factor, flip)
        if self.with_mask:
            gt_masks = self.mask_transform(ann['masks'], pad_shape,
                                           scale_factor, flip)

        ori_shape = (img_info['height'], img_info['width'], 3)
        img_meta = dict(
            ori_shape=ori_shape,
            img_shape=img_shape,
            pad_shape=pad_shape,
            scale_factor=scale_factor,
            flip=flip)

        data = dict(
            img=DC(to_tensor(img), stack=True),
            img_meta=DC(img_meta, cpu_only=True),
            gt_bboxes=DC(to_tensor(gt_bboxes)))

        if self.with_label:
            data['gt_labels'] = DC(to_tensor(gt_labels))
        if self.with_crowd:
            data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
        if self.with_mask:
            data['gt_masks'] = DC(gt_masks, cpu_only=True)


        #--------------------offline ray label generation-----------------------------
        self.center_sample = True
        self.use_mask_center = False
        self.radius = 1.5
        self.strides = [8, 16, 32, 64, 128]
        self.regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),(512, INF))
        featmap_sizes = self.get_featmap_size(pad_shape)
        self.featmap_sizes = featmap_sizes
        num_levels = len(self.strides)
        all_level_points = self.get_points(featmap_sizes)
        self.num_points_per_level = [i.size()[0] for i in all_level_points]

        expanded_regress_ranges = [
            all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
                all_level_points[i]) for i in range(num_levels)
        ]
        concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
        concat_points = torch.cat(all_level_points, 0)
        gt_masks = gt_masks[:len(gt_bboxes)]

        gt_bboxes = torch.Tensor(gt_bboxes)
        gt_labels = torch.Tensor(gt_labels)

        _labels, _bbox_targets, _mask_targets = self.fcos_target_single(
            gt_bboxes,gt_masks,gt_labels,concat_points, concat_regress_ranges)

        data['_gt_labels'] = DC(_labels)
        data['_gt_bboxes'] = DC(_bbox_targets)
        data['_gt_masks'] = DC(_mask_targets)
        #--------------------offline ray label generation-----------------------------


        return data
Пример #17
0
def load_corruption_dataset():
    # via number:
    for i in range(15):
        for severity in range(5):
            corrupted = corrupt(image, corruption_number=i, severity=severity + 1)
Пример #18
0
from imagecorruptions import corrupt, get_corruption_names
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import time

image = np.asarray(Image.open('test_image.jpg'))
#image = np.ones((427, 640, 3), dtype=np.uint8)

# corrupted_image = corrupt(img, corruption_name='gaussian_blur', severity=1)

for corruption in get_corruption_names('blur'):
    tic = time.time()
    for severity in range(5):
        corrupted = corrupt(image,
                            corruption_name=corruption,
                            severity=severity + 1)
        plt.imshow(corrupted)
        plt.show()
    print(corruption, time.time() - tic)
Пример #19
0
    def prepare_test_img(self, idx):
        """Prepare an image for testing (multi-scale and flipping)"""
        img_info = self.img_infos[idx]
        img = mmcv.imread(osp.join(self.img_prefix[:-11],
                                   img_info['filename']))
        # corruption
        if self.corruption is not None:
            img = corrupt(img,
                          severity=self.corruption_severity,
                          corruption_name=self.corruption)
        # load proposals if necessary
        if self.proposals is not None:
            proposal = self.proposals[idx][:self.num_max_proposals]
            if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
                raise AssertionError(
                    'proposals should have shapes (n, 4) or (n, 5), '
                    'but found {}'.format(proposal.shape))
        else:
            proposal = None

        # get img_refer from first frame
        first_frame_idx = img_info["first_frame"]
        refer_info = self.img_infos[first_frame_idx]
        refer_ann = self.get_ann_info(first_frame_idx)
        img_refer = mmcv.imread(
            osp.join(self.img_prefix[:-11], refer_info['filename']))
        # crop the bbox
        img_refer = torch.squeeze(
            torch.Tensor(mmcv.imcrop(img_refer, refer_ann["bboxes"])))
        # resize to refer_scale
        img_refer = torch.Tensor(
            mmcv.imresize(np.float32(img_refer),
                          self.refer_scale,
                          return_scale=False)).permute(2, 0, 1)

        def prepare_single(img, scale, flip, proposal=None):
            _img, img_shape, pad_shape, scale_factor = self.img_transform(
                img, scale, flip, keep_ratio=self.resize_keep_ratio)
            _img = to_tensor(_img)
            _img_meta = dict(ori_shape=(img_info['height'], img_info['width'],
                                        3),
                             img_shape=img_shape,
                             pad_shape=pad_shape,
                             scale_factor=scale_factor,
                             flip=flip)
            if proposal is not None:
                if proposal.shape[1] == 5:
                    score = proposal[:, 4, None]
                    proposal = proposal[:, :4]
                else:
                    score = None
                _proposal = self.bbox_transform(proposal, img_shape,
                                                scale_factor, flip)
                _proposal = np.hstack([_proposal, score
                                       ]) if score is not None else _proposal
                _proposal = to_tensor(_proposal)
            else:
                _proposal = None
            return _img, _img_meta, _proposal

        imgs = []
        img_metas = []
        img_refers = []
        proposals = []
        for scale in self.img_scales:
            _img, _img_meta, _proposal = prepare_single(
                img, scale, False, proposal)
            imgs.append(_img)
            img_metas.append(DC(_img_meta, cpu_only=True))
            img_refers.append(DC(to_tensor(img_refer), stack=True))
            proposals.append(_proposal)
            if self.flip_ratio > 0:
                _img, _img_meta, _proposal = prepare_single(
                    img, scale, True, proposal)
                imgs.append(_img)
                img_metas.append(DC(_img_meta, cpu_only=True))
                img_refers.append(DC(to_tensor(img_refer), stack=True))
                proposals.append(_proposal)
        data = dict(img=imgs, img_meta=img_metas, img_refer=img_refers)
        if self.proposals is not None:
            data['proposals'] = proposals
        return data
Пример #20
0
def main():
    best_acc1 = 0
    args = parser.parse_args()
    assert args.batch_size % args.effective_bs == 0, "Effective batch size must be a divisor of batch_size"
    
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
        
    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()
        
    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        
    if args.train:
        writer = SummaryWriter()
        optimizer = torch.optim.Adam(model.parameters(), args.lr)
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)
        
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                #best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)            
            model.load_state_dict(checkpoint['state_dict'])
            if args.train:
                try:
                    optimizer.load_state_dict(checkpoint['optimizer'])
                    print("=> loaded optimizer state from checkpoint")
                except:
                    print("=> optimizer state not found in checkpoint")
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            
    cudnn.benchmark = True
    
    # Data loading code
    traindir = os.path.join(args.data, 'train')
    norm_params = {'mean':[0.485, 0.456, 0.406],
                  'std':[0.229, 0.224, 0.225]}
    normalize = transforms.Normalize(mean=norm_params['mean'],
                                     std=norm_params['std'])
    test_loader = get_test_loader(args, normalize)
    if args.evaluate == 'corrupted':
        corrupted_test_loader = get_test_loader(args, normalize, lambda img: apply_random_corruption(img, test=True))
    
    
    if args.train:
        if args.augment_train_data:
            # as augmodel will be applied before normalization,
            train_dataset = datasets.ImageFolder(
                traindir,
                transforms.Compose([
                    transforms.RandomResizedCrop(224),
                    transforms.ToTensor(),
            ]))
            if args.augmentations:
                ops = []
                for aug in args.augmentations:
                    ops.append(augmentations.__dict__[aug])
            else:
                ops = augmentations.standard_augmentations
            # initialize augmodel
            print('Using augmentations ' + str(ops))
            augmodel = AugModel(norm_params=norm_params, augmentations=ops, augmentation_mean=args.augmentation_mean, augmentation_std=args.augmentation_std, min_magnitude=args.min_magnitude, max_magnitude=args.max_magnitude)
            if args.resume and 'augmodel_state_dict' in checkpoint.keys():
                augmodel.load_state_dict(checkpoint['augmodel_state_dict'])
            if 'AdaptiveStyleTransfer' in args.augmentations:
                augmodel.augmentations[1].initStyles(args.style_subset, seed=args.seed)
            if 'StyleTransfer' in args.augmentations and args.style_subset is not None:
                op = augmodel.augmentations[1]
                assert str(op) == 'StyleTransfer'
                pbn = op._PainterByNumbers
                assert 0 < args.style_subset < len(pbn)
                if args.seed:
                    rng_state = torch.get_rng_state() # save the pseudo-random state
                    torch.manual_seed(args.seed) # set the seed for deterministic dataset splits
                pbn_split, _ = torch.utils.data.dataset.random_split(pbn, [args.style_subset, len(pbn) - args.style_subset])
                if args.seed:
                    torch.set_rng_state(rng_state) # reset the state for non-deterministic behaviour below
                op._PainterByNumbers = pbn_split
                op.resetStyleLoader(args.effective_bs)
            if args.gpu is not None:
                augmodel = augmodel.cuda(args.gpu)
                augmodel.augmentations[1].enc_to()
                augmodel.augmentations[1].dec_to()
        else:
            train_dataset = datasets.ImageFolder(
                traindir,
                transforms.Compose([
                    transforms.RandomResizedCrop(224),
                    transforms.ToTensor(),
                    normalize
            ]))
            augmodel = None

        if args.ho:
            ho_criterion = nn.CrossEntropyLoss().cuda(args.gpu)
            ho_optimizer = torch.optim.Adam([p for p in augmodel.parameters() if p.requires_grad], args.ho_lr)
            if args.resume and 'ho_optimizer' in checkpoint.keys():
                try:
                    ho_optimizer.load_state_dict(checkpoint['ho_optimizer'])
                    print("=> loaded optimizer state from checkpoint")
                except:
                    print("=> optimizer state not found in checkpoint")
                
            # train/val split
            train_size = int(len(train_dataset) * args.train_size)
            if args.seed:
                rng_state = torch.get_rng_state() # save the pseudo-random state
                torch.manual_seed(args.seed) # set the seed for deterministic dataset splits
            train_split, val_split = torch.utils.data.dataset.random_split(train_dataset, [train_size, len(train_dataset) - train_size])
            if args.seed:
                torch.set_rng_state(rng_state) # reset the state for non-deterministic behaviour below
            if args.validation_objective == 'clean':
                val_transform = transforms.Compose([
                        transforms.Resize(256),
                        transforms.CenterCrop(224),
                        transforms.ToTensor(),
                        normalize,
                    ])
            elif args.validation_objective == 'corrupted':
                val_transform = transforms.Compose([
                        transforms.Resize(256),
                        transforms.CenterCrop(224),
                        transforms.Lambda(apply_random_corruption),
                        transforms.ToTensor(),
                        normalize,
                    ])
            # as the underlying dataset of both splits is the same, this is the only way of having separate transforms for train and val split
            val_dataset = datasets.ImageFolder(traindir, transform=val_transform)
            val_split.dataset = val_dataset
            
            train_loader = torch.utils.data.DataLoader(
                train_split, batch_size=args.batch_size, shuffle=True,
                num_workers=args.workers, pin_memory=True, drop_last=True)

            val_loader = InfiniteDataLoader(
                val_split, batch_size=args.batch_size, shuffle=True, 
                num_workers=args.workers, pin_memory=True, drop_last=True)   
        else:
            if args.path_to_stylized and not args.augment_train_data:
                stylized_imagenet = datasets.ImageFolder(root=traindir, loader=stylized_loader, transform=transforms.Compose([transforms.ToTensor(), normalize]))
                train_dataset = torch.utils.data.ConcatDataset([train_dataset, stylized_imagenet])
                
            train_loader = torch.utils.data.DataLoader(
                train_dataset, batch_size=args.batch_size, shuffle=True,
                num_workers=args.workers, pin_memory=True, drop_last=True)
            val_loader = None
            ho_criterion = None
            ho_optimizer = None
        
        # training
        for epoch in range(args.start_epoch, args.epochs):
            if args.decrease_temperature is not None and (epoch - args.start_epoch) % args.decrease_temperature == 0 and not epoch==args.start_epoch:
                augmodel.augmentations[1].temperature /= 2
            if args.increasing_alpha is not None and (epoch - args.start_epoch) % args.increasing_alpha == 0:
                op = augmodel.augmentations[1]
                assert str(op) == 'StyleTransfer'
                current_alpha = op.mu_mag
                
                ckpt = {
                    'epoch': epoch,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer' : optimizer.state_dict(),
                }
                if args.ho:
                    ckpt['augmodel_state_dict'] = augmodel.state_dict()
                    ckpt['ho_optimizer'] = ho_optimizer.state_dict()
                save_checkpoint(ckpt, is_best=False, filename='checkpoint_alpha_%1.3f.pth.tar'%(current_alpha.item()))
                
                updated_alpha = current_alpha + 0.1
                op.mu_mag = updated_alpha
                print("=> alpha=%1.2f"%(op.mu_mag.item()))
            train(train_loader, val_loader, model, augmodel, criterion, ho_criterion, optimizer, ho_optimizer, epoch, args, writer)
            is_best = False
            # evaluate on validation set
            if epoch % args.print_freq == 0:
                acc1 = validate(test_loader, model, criterion, args)
                writer.add_scalar('Metrics/test_acc', acc1, epoch)
                if args.evaluate == 'corrupted':
                    mpc = validate(corrupted_test_loader, model, criterion, args)
                    writer.add_scalar('Metrics/test_mpc', mpc, epoch)
                
                # remember best acc@1 and save checkpoint
                is_best = acc1 > best_acc1
                best_acc1 = max(acc1, best_acc1)
            
            ckpt = {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer' : optimizer.state_dict(),
            }
            
            if args.ho:
                ckpt['augmodel_state_dict'] = augmodel.state_dict()
                ckpt['ho_optimizer'] = ho_optimizer.state_dict()

            save_checkpoint(ckpt, is_best)
            
    if args.evaluate == 'clean':
        validate(test_loader, model, criterion, args)
    elif args.evaluate == 'corrupted':
        corruptions = ic.get_corruption_names('all')
        severities = [0,1,2,3,4,5]
        accuracies = {}
        for corruption in corruptions:
            accuracies[corruption] = {}
            for severity in severities:
                if severity == 0:
                    print('Testing clean')
                    acc = validate(test_loader, model, criterion, args)
                    accuracies[corruption][severity] = torch.squeeze(acc.cpu()).item()
                else:
                    print('Testing %s:%d'%(corruption, severity))
                    corrupted_loader = get_test_loader(args, normalize, lambda x: Image.fromarray(ic.corrupt(np.array(x, dtype=np.uint8), corruption_name=corruption, severity=severity)))
                    acc = validate(corrupted_loader, model, criterion, args)
                    accuracies[corruption][severity] = torch.squeeze(acc.cpu()).item()
        if args.train:
            e = args.epochs
        elif args.resume:
            e = args.start_epoch
        pickle.dump(accuracies, open("robustness_epoch_{}.pkl".format(e), "wb"))
Пример #21
0
def main():
    best_acc1 = 0
    args = parser.parse_args()
    assert args.batch_size % args.effective_bs == 0, "Effective batch size must be a divisor of batch_size"

    if len(args.gpu) < 2:
        print("Two GPU's are needed for training. Exiting...")
        exit()
    else:
        print("Use GPU: {} for training".format(args.gpu[:2]))
        gpu0 = args.gpu[0]
        gpu1 = args.gpu[1]

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    model = model.cuda(gpu0)

    if args.train:
        writer = SummaryWriter()
        optimizer = torch.optim.SGD(model.parameters(), args.lr)
    criterion = nn.CrossEntropyLoss().cuda(gpu0)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            # Map model to be loaded to specified single gpu.
            loc = 'cuda:{}'.format(gpu0)
            checkpoint = torch.load(args.resume, map_location=loc)

            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            best_acc1 = best_acc1.to(gpu0)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer = torch.optim.SGD(model.parameters(),
                                        args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
            if args.train:
                try:
                    optimizer.load_state_dict(checkpoint['optimizer'])
                    print("=> loaded optimizer state from checkpoint")
                except:
                    print("=> optimizer state not found in checkpoint")
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    norm_params = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
    normalize = transforms.Normalize(mean=norm_params['mean'],
                                     std=norm_params['std'])
    NORM = kornia.color.Normalize(mean=torch.tensor(norm_params['mean'],
                                                    dtype=torch.float32),
                                  std=torch.tensor(norm_params['std'],
                                                   dtype=torch.float32))
    test_loader = get_test_loader(args, normalize)
    if args.evaluate == 'corrupted':
        corrupted_test_loader = get_test_loader(
            args, normalize,
            lambda img: apply_random_corruption(img, test=True))

    if args.train:
        # as augmodel will be applied before normalization,
        AST = AdaptiveStyleTransfer(temperature=torch.tensor(0.1),
                                    mean=0.,
                                    logits=torch.zeros(args.style_subset,
                                                       dtype=torch.float32,
                                                       requires_grad=True))
        AST.cuda(gpu1)
        AST.initStyles(args.style_subset, seed=args.seed)

        def styletransfer(img):
            if np.random.uniform() < 0.5:
                img = AST(img)
            return NORM(img)

        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.ToTensor(),
            ]))

        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=args.workers,
                                                   pin_memory=True,
                                                   drop_last=True)

        # training
        for epoch in range(args.start_epoch, args.epochs):
            if args.increasing_alpha is not None and (
                    epoch - args.start_epoch) % args.increasing_alpha == 0:
                current_alpha = AST.mu_mag

                ckpt = {
                    'epoch': epoch,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }
                save_checkpoint(ckpt,
                                is_best=False,
                                filename='checkpoint_alpha_%1.3f.pth.tar' %
                                (current_alpha.item()))

                updated_alpha = current_alpha + 0.1
                AST.mu_mag = updated_alpha
                print('=> Alpha={}'.format(AST.mu_mag.item()))
            train(train_loader, model, styletransfer, criterion, optimizer,
                  epoch, args, writer)
            is_best = False
            # evaluate on validation set
            if epoch % args.print_freq == 0:
                acc1 = validate(test_loader, model, criterion, args)
                writer.add_scalar('Metrics/test_acc', acc1, epoch)
                if args.evaluate == 'corrupted':
                    mpc = validate(corrupted_test_loader, model, criterion,
                                   args)
                    writer.add_scalar('Metrics/test_mpc', mpc, epoch)

                # remember best acc@1 and save checkpoint
                is_best = acc1 > best_acc1
                best_acc1 = max(acc1, best_acc1)

            ckpt = {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            }

            save_checkpoint(ckpt, is_best)
            writer.add_scalar('AST/alpha',
                              AST.mu_mag.cpu().detach().numpy(), epoch)

    if args.evaluate == 'clean':
        validate(test_loader, model, criterion, args)
    elif args.evaluate == 'corrupted':
        corruptions = ic.get_corruption_names('all')
        severities = [0, 1, 2, 3, 4, 5]
        accuracies = {}
        for corruption in corruptions:
            accuracies[corruption] = {}
            for severity in severities:
                if severity == 0:
                    print('Testing clean')
                    acc = validate(test_loader, model, criterion, args)
                    accuracies[corruption][severity] = torch.squeeze(
                        acc.cpu()).item()
                else:
                    print('Testing %s:%d' % (corruption, severity))
                    corrupted_loader = get_test_loader(
                        args, normalize, lambda x: Image.fromarray(
                            ic.corrupt(np.array(x, dtype=np.uint8),
                                       corruption_name=corruption,
                                       severity=severity)))
                    acc = validate(corrupted_loader, model, criterion, args)
                    accuracies[corruption][severity] = torch.squeeze(
                        acc.cpu()).item()
        if args.train:
            e = args.epochs
        elif args.resume:
            e = args.start_epoch
        pickle.dump(accuracies, open("robustness_epoch_{}.pkl".format(e),
                                     "wb"))
Пример #22
0
    def prepare_test_img(self, idx):
        """Prepare an image for testing (multi-scale and flipping)"""
        img_info = self.img_infos[idx]
        img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
        # corruption
        if self.corruption is not None:
            img = corrupt(img,
                          severity=self.corruption_severity,
                          corruption_name=self.corruption)
        # load proposals if necessary
        if self.proposals is not None:
            proposal = self.proposals[idx][:self.num_max_proposals]
            if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
                raise AssertionError(
                    'proposals should have shapes (n, 4) or (n, 5), '
                    'but found {}'.format(proposal.shape))
        else:
            proposal = None

        def prepare_single(img, scale, flip, proposal=None):
            _img, img_shape, pad_shape, scale_factor = self.img_transform(
                img, scale, flip, keep_ratio=self.resize_keep_ratio)
            _img = to_tensor(_img)
            _img_meta = dict(ori_shape=(img_info['height'], img_info['width'],
                                        3),
                             img_shape=img_shape,
                             pad_shape=pad_shape,
                             scale_factor=scale_factor,
                             flip=flip)
            if proposal is not None:
                if proposal.shape[1] == 5:
                    score = proposal[:, 4, None]
                    proposal = proposal[:, :4]
                else:
                    score = None
                _proposal = self.bbox_transform(proposal, img_shape,
                                                scale_factor, flip)
                _proposal = np.hstack([_proposal, score
                                       ]) if score is not None else _proposal
                _proposal = to_tensor(_proposal)
            else:
                _proposal = None
            return _img, _img_meta, _proposal

        imgs = []
        img_metas = []
        proposals = []
        for scale in self.img_scales:
            _img, _img_meta, _proposal = prepare_single(
                img, scale, False, proposal)
            imgs.append(_img)
            img_metas.append(DC(_img_meta, cpu_only=True))
            proposals.append(_proposal)
            if self.flip_ratio > 0:
                _img, _img_meta, _proposal = prepare_single(
                    img, scale, True, proposal)
                imgs.append(_img)
                img_metas.append(DC(_img_meta, cpu_only=True))
                proposals.append(_proposal)
        data = dict(img=imgs, img_meta=img_metas)
        if self.proposals is not None:
            data['proposals'] = proposals
        return data
Пример #23
0
def run(
    #################################################################################################
    #                                          Parameters:                                          #
    #################################################################################################
    MODELS = None,
    PRINT_OUT = True,    # Print out results at end

    # imagenet-c:
    CORRUPT_IMG = False,
    COR_NUM = 7,    # 7 is now, 8 is frost - but they both error out :/
    COR_SEVERITY = 5,

    # Adversarial Attacks:
    adversarial_attack = False,
    adversarial_type = 'hop_skip',  # 'fast' or 'projected' recommended

    # Bit-flipping corruptions:
    stuck_at_faults = 0,  # This many bits will have "stuck-at faults" in the weights, permanently stuck at either 1 or 0
    weights_BER = 0,  # Bit Error Rate for weights (applied each batch, assuming weights are reloaded for each batch)
    activation_BER = 0,  # Bit Error Rate for activations, i.e. 1e-9 = ~(1 in 1000000000) errors in the activations

    # Model parameters:
    num_batches = 1,  # Number of loops performed, each with a new batch of images
    batch_size = 4,  # Number of images processed in a batch (in parallel)
    val_image_dir = 'val/',  # The directory where validation images are stored
    voting_heuristic = 'sum all',  # Determines the algorithm used to predict between multiple models

    cuda = torch.cuda.is_available()
):
    if MODELS is None:
        MODELS = ['resnext101_32x8d', 'densenet161', 'inception_v3']  # For an ensemble, put >1 network here
    reset_bit_flip_counters()   # Do this at start in case calling run() multiple times

    #################################################################################################
    #                                           Runtime:                                            #
    #################################################################################################
    # Instantiate the model(s)
    networks = []
    for i, m in enumerate(MODELS):
        net = get_model(m)
        if cuda:
            net = net.cuda()
        net.name = str(i) + '_' + net.__class__.__name__  # Give the net a unique name (used by bit_flipping.py)
        net.eval()  # Put in evaluation mode (already pretrained)
        if stuck_at_faults != 0:
            net = flip_n_bits_in_weights(stuck_at_faults, net)  # Introduce stuck-ats
        if activation_BER != 0:  # If nonzero chance of activation bit flips
            net = add_activation_bit_flips(net, activation_BER)  # Add layers to flip activation bits
        networks.append(net)

    if CORRUPT_IMG:
        print('Corrupting with COR_NUM: ' + str(COR_NUM) + ' and COR_SEVERITY: ' + str(COR_SEVERITY))

    # Run each batch
    total_correct = 0
    for batch_num in range(num_batches):
        # Load images and prepare them in a batch
        image_paths = random.sample(os.listdir(val_image_dir), batch_size)
        gt_labels = torch.tensor([get_label(image) for image in image_paths])  # Ground-truth label for each image

        batch_t = torch.empty((batch_size, 3, 224, 224))  # Shape of [N, C, H, W]
        for i in range(batch_size):
            img = Image.open(val_image_dir + '/' + image_paths[i]).convert("RGB")
            img = toSizeCenter(img)
            if CORRUPT_IMG:
                pic_np = np.array(img)  # numpy arr for corruption
                pic_np = corrupt(pic_np, severity=COR_SEVERITY, corruption_number=COR_NUM)  # See Readme for Calls
                img = Image.fromarray(np.uint8(pic_np))  # Back to PIL
            img_t = toTensor(img)
            # img_t = fast_gradient_method(networks[0], img_t, eps=0.25, norm=np.inf, sanity_checks=True)
            batch_t[i, :, :, :] = img_t
        if cuda:
            batch_t = batch_t.cuda()
        if adversarial_attack:
            if adversarial_type == 'fast':
                batch_t = fast_gradient_method(networks[0], batch_t, eps=0.25, norm=np.inf, sanity_checks=True)   # ~25%
            elif adversarial_type == 'projected':
                batch_t = projected_gradient_descent(networks[0], batch_t, 0.25, 0.01, 40, np.inf)                  # 0.25, 0.01, 40: ~6.25
            elif adversarial_type == 'hop_skip':
                batch_t = hop_skip_jump_attack(networks[0], batch_t, np.inf, verbose=True)

            else:
                exit("Unrecognized adversarial attack type: " + str(adversarial_type))

        # Run each network and store output in 'out'
        out = torch.empty(
            (len(MODELS), batch_size, 1000))  # Shape [M, N, 1000] where M = num models, and N = batch size
        for i, net in enumerate(networks):
            if weights_BER != 0:  # If nonzero chance of weight bit flips
                net = flip_stochastic_bits_in_weights(weights_BER, net)
            out[i, :, :] = net(batch_t)

        predictions = vote(out, voting_heuristic)  # Returns predictions, with shape [N] (one prediction per image)
        num_correct = torch.sum(predictions == gt_labels).item()  # Item() pulls the integer out of the tensor

        total_correct += num_correct
        print("Batch %d:  %d / %d" % (batch_num, num_correct, batch_size))

    #################################################################################################
    #                                         Print Results:                                        #
    #################################################################################################

    percentage_correct = (total_correct / (batch_size * num_batches)) * 100
    if PRINT_OUT:
        print("Percentage Correct: %.2f%%" % percentage_correct)
        for i, net in enumerate(networks):
            print(MODELS[i] + str(':'))
            print("\t Total bit flips in weights:", get_flips_in_weights(net), "or %.0f per minute of inference"
                  % (get_flips_in_weights(net) / (num_batches / (32 * 60))))  # 32 batches/second (32 fps) * 60 seconds
            print("\t Total bit flips in activations:", get_flips_in_activations(net), "or %.0f per minute of inference"
                  % (get_flips_in_activations(net) / (num_batches / (32 * 60))))  # 32 batches/second (32 fps) * 60 seconds
            print("\t", stuck_at_faults, "out of", (get_num_params(net) * 32),
                  " weight bits permanently corrupted, or %.8f%%"
                  % ((stuck_at_faults / (get_num_params(net) * 32)) * 100))

    return [percentage_correct, get_num_weight_flips(), get_num_activation_flips()]
Пример #24
0
def corrupt(image: np.ndarray, corruption: str, severity: int):
    return imagecorruptions.corrupt(image,
                                    corruption_name=corruption,
                                    severity=severity)
Пример #25
0
    def eval_child_model(self, model, data_loader, mode, robustness=False, corruptions=None, severities=None):
        """Evaluate the child model.

        Args:
          model: image model that will be evaluated.
          data_loader: dataset object to extract eval data from.
          mode: will the model be evalled on train, val or test.

        Returns:
          Accuracy of the model on the specified dataset.
        """
        tf.logging.info('Evaluating child model in mode {}'.format(mode))
        while True:
            try:
                if mode == 'val':
                    loader = self.data_loader.dataloader_val
                elif mode == 'test':
                    loader = self.data_loader.dataloader_test
                else:
                    raise ValueError('Not valid eval mode')
                tf.logging.info('model.batch_size is {}'.format(model.batch_size))
                if robustness:
                    if corruptions is None:
                        corruptions = ic.get_corruption_names()
                    if severities is None:
                        severities = [0,1,2,3,4,5]
                    if mode == 'val':
                        # if mode is 'val', apply a random corruption on a random severity to each image
                        correct = 0
                        count = 0
                        for images, labels in loader:
                            images = np.transpose(images.numpy(), [0,2,3,1])
                            labels = labels.numpy()
                            # produce one-hot target vector
                            labels = np.eye(model.num_classes)[labels]
                            # inverse normalization
                            means = data_loader.augmentation_transforms.MEANS[data_loader.hparams.dataset]
                            stds = data_loader.augmentation_transforms.STDS[data_loader.hparams.dataset]
                            images = ((images * stds) + means) * 255
                            # corrupt
                            images =  images.astype(np.uint8)
                            for j in range(len(images)):
                                s = np.random.choice(severities, 1)[0]
                                if s == 0:
                                    continue
                                c = np.random.choice(corruptions, 1)[0]
                                images[j] = ic.corrupt(images[j], corruption_name=c, severity=s)
                            # normalize
                            images = ((images - means) / stds) / 255.
                            preds = self.session.run(
                                model.predictions,
                                feed_dict={
                                    model.images: images,
                                    model.labels: labels,
                                })
                            correct += np.sum(
                                np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
                            count += len(preds)
                        assert count == len(loader.dataset)
                        tf.logging.info('correct: {}, total: {}'.format(correct, count))
                        return correct / count
                    else:
                        # if mode is 'test', test all corruptions and severities on each image
                        accuracies = {c: {s: 0 for s in range(6)} for c in corruptions}
                        for c in corruptions:
                            for s in severities:
                                if (s == 0):
                                    if c == corruptions[0]:
                                        # iterate once over the clean dataset
                                        correct = 0
                                        count = 0
                                        progress_bar = tqdm.tqdm(loader)
                                        progress_bar.set_description('Clean')
                                        for images, labels in progress_bar:
                                            images = np.transpose(images.numpy(), [0,2,3,1])
                                            labels = labels.numpy()
                                            # produce one-hot target vector
                                            labels = np.eye(model.num_classes)[labels]
                                            preds = self.session.run(
                                                model.predictions,
                                                feed_dict={
                                                    model.images: images,
                                                    model.labels: labels,
                                                })
                                            correct += np.sum(
                                                np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
                                            count += len(preds)
                                        assert count == len(loader.dataset)
                                        accuracies[c][s] = correct / count
                                    else:
                                        # clean performance has been evaluated before
                                        # and will just be copied here for convenience
                                        accuracies[c][s] = accuracies[corruptions[0]][s]
                                else:
                                    correct = 0
                                    count = 0

                                    progress_bar = tqdm.tqdm(loader)
                                    progress_bar.set_description('Corruption: {}, Severity: {}'.format(c, s))
                                    for images, labels in progress_bar:
                                        images = np.transpose(images.numpy(), [0,2,3,1])
                                        labels = labels.numpy()
                                        # produce one-hot target vector
                                        labels = np.eye(model.num_classes)[labels]
                                        # inverse normalization
                                        means = data_loader.augmentation_transforms.MEANS[data_loader.hparams.dataset]
                                        stds = data_loader.augmentation_transforms.STDS[data_loader.hparams.dataset]
                                        images = ((images * stds) + means) * 255
                                        # corrupt
                                        images =  images.astype(np.uint8)
                                        for j in range(len(images)):
                                            images[j] = ic.corrupt(images[j], corruption_name=c, severity=s)
                                        # normalize
                                        images = ((images - means) / stds) / 255.

                                        preds = self.session.run(
                                            model.predictions,
                                            feed_dict={
                                                model.images: images,
                                                model.labels: labels,
                                            })
                                        correct += np.sum(
                                            np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
                                        count += len(preds)
                                    assert count == len(loader.dataset)
                                    accuracies[c][s] = correct / count
                    return accuracies

                else:
                    correct = 0
                    count = 0
                    for images, labels in loader:
                        images = np.transpose(images.numpy(), [0,2,3,1])
                        labels = labels.numpy()
                        # produce one-hot target vector
                        labels = np.eye(model.num_classes)[labels]
                        preds = self.session.run(
                            model.predictions,
                            feed_dict={
                                model.images: images,
                                model.labels: labels,
                            })
                        correct += np.sum(
                            np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
                        count += len(preds)
                    assert count == len(loader.dataset)
                    tf.logging.info('correct: {}, total: {}'.format(correct, count))
                    accuracy = correct / count
                    tf.logging.info(
                        'Eval child model accuracy: {}'.format(accuracy))
                    # If epoch trained without raising the below errors, break
                    # from loop.
                    break
            except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:
                tf.logging.info(
                    'Retryable error caught: {}.  Retrying.'.format(e))

        return accuracy