コード例 #1
0
def loading_data():
    mean_std = cfg.DATA.MEAN_STD
    train_simul_transform = own_transforms.Compose([
        own_transforms.Scale(int(cfg.TRAIN.IMG_SIZE[0] / 0.875)),
        own_transforms.RandomCrop(cfg.TRAIN.IMG_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_simul_transform = own_transforms.Compose([
        own_transforms.Scale(int(cfg.TRAIN.IMG_SIZE[0] / 0.875)),
        own_transforms.CenterCrop(cfg.TRAIN.IMG_SIZE)
    ])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    target_transform = standard_transforms.Compose([
        own_transforms.MaskToTensor(),
        own_transforms.ChangeLabel(cfg.DATA.IGNORE_LABEL, cfg.DATA.NUM_CLASSES - 1)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = CityScapes('train', simul_transform=train_simul_transform, transform=img_transform,
                           target_transform=target_transform)
    train_loader = DataLoader(train_set, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=16, shuffle=True)
    val_set = CityScapes('val', simul_transform=val_simul_transform, transform=img_transform,
                         target_transform=target_transform)
    val_loader = DataLoader(val_set, batch_size=cfg.VAL.BATCH_SIZE, num_workers=16, shuffle=False)

    return train_loader, val_loader, restore_transform
コード例 #2
0
ファイル: dataset.py プロジェクト: nnizhang/S2MA
    def __getitem__(self, item):
        fn = self.image_path[item].split('/')

        filename = fn[-1]
        image = Image.open(self.image_path[item]).convert('RGB')
        image_w, image_h = int(image.size[0]), int(image.size[1])
        depth = Image.open(self.depth_path[item]).convert('L')

        # data augmentation
        if self.mode == 'train':

            label = Image.open(self.label_path[item]).convert('L')
            random_size = scale_size

            new_img = trans.Scale((random_size, random_size))(image)
            new_depth = trans.Scale((random_size, random_size))(depth)
            new_label = trans.Scale((random_size, random_size), interpolation=Image.NEAREST)(label)

            # random crop
            w, h = new_img.size
            if w != img_size and h != img_size:
                x1 = random.randint(0, w - img_size)
                y1 = random.randint(0, h - img_size)
                new_img = new_img.crop((x1, y1, x1 + img_size, y1 + img_size))
                new_depth = new_depth.crop((x1, y1, x1 + img_size, y1 + img_size))
                new_label = new_label.crop((x1, y1, x1 + img_size, y1 + img_size))

            # random flip
            if random.random() < 0.5:
                new_img = new_img.transpose(Image.FLIP_LEFT_RIGHT)
                new_depth = new_depth.transpose(Image.FLIP_LEFT_RIGHT)
                new_label = new_label.transpose(Image.FLIP_LEFT_RIGHT)

            new_img = self.transform(new_img)
            new_depth = self.depth_transform(new_depth)

            new_depth = new_depth.expand(3, img_size, img_size)
            label_256 = self.t_transform(new_label)
            if self.label_32_transform is not None and self.label_64_transform is not None and self.label_128_transform is\
                    not None:
                label_32 = self.label_32_transform(new_label)
                label_64 = self.label_64_transform(new_label)
                label_128 = self.label_128_transform(new_label)
                return new_img, new_depth, label_256, label_32, label_64, label_128, filename
        else:

            image = self.transform(image)
            depth = self.depth_transform(depth)
            depth = depth.expand(3, img_size, img_size)

            return image, depth, image_w, image_h, self.image_path[item]
コード例 #3
0
ファイル: demo.py プロジェクト: zbxzc35/pytorch_CAM
def main():

   normalize = trans.Normalize(mean=[0.4001, 0.4401, 0.4687],
                                    std=[0.229, 0.224, 0.225])
   transform = trans.Compose([
       trans.Scale((224,224)),
       trans.ToTensor(),
       normalize,
   ])

   classes = {int(key): value for (key, value)
              in parse_json(configs.class_info_dir).items()}

   vgg_cam = models.vgg_cam()
   vgg_cam = vgg_cam.cuda()
   checkpoint = torch.load(configs.best_ckpt_dir)
   vgg_cam.load_state_dict(checkpoint['state_dict'])

   # hook the feature extractor
   features_blobs = []

   def hook_feature(module, input, output):
       features_blobs.append(output.data.cpu().numpy())

   finalconv_name = 'classifier'  # this is the last conv layer of the network
   vgg_cam._modules.get(finalconv_name).register_forward_hook(hook_feature)

   # get the softmax weight
   params = list(vgg_cam.parameters())
   weight_softmax = np.squeeze(params[-1].data.cpu().numpy())

   img_path = 'playing_guitar_023.jpg'
   save_fig_dir = 'cam_' + img_path
   img_pil = Image.open(img_path)
   img_tensor = transform(img_pil)
   img_variable = Variable(img_tensor.unsqueeze(0).cuda())
   transformed_img = img_variable.data.cpu().numpy()[0]
   transformed_img = untransform(transformed_img)
   outputs, _ = vgg_cam(img_variable)
   h_x = F.softmax(outputs).data.squeeze()
   probs, idx = h_x.sort(0, True)
   top_number = 5
   prob = probs.cpu().numpy()[:top_number]
   idx_ =  idx.cpu().numpy()[:top_number]
   OUT_CAM = returnCAM(features_blobs[-1],weight_softmax,idx_,prob)
   plt.figure(1, figsize=(8, 6))
   ax =  plt.subplot(231)
   ax.imshow(transformed_img[:,:,(2,1,0)])

   for b_index, (idx,prob_in,cam) in enumerate(zip(idx_,prob,OUT_CAM)):

      cl = str(classes[idx])
      height, width, _ = transformed_img.shape
      heatmap = cv2.applyColorMap(cv2.resize(cam, (width, height)), cv2.COLORMAP_JET)
      result = heatmap * 0.3 + transformed_img * 0.7
      ax = plt.subplot(2,3,b_index+2)
      ax.imshow(result.astype(np.uint8)[:,:,(2,1,0)])
      ax.set_title(('{}:{}').format(cl,('%.3f' % prob_in)), fontsize=8)

   plt.savefig(save_fig_dir)
コード例 #4
0
ファイル: test_cameras.py プロジェクト: craigmbooth/raytracer
    def test_render_scene(self):
        """Test we can render a pixel in a simple scene"""


        # Inner sphere size 0.5, centered on the origin
        s1 = shapes.Sphere()
        s1.set_transform(transforms.Scale(0.5,0.5,0.5))

        # Outer sphere centered on the origin, size 1.0
        s2 = shapes.Sphere()
        s2.material = materials.Material(
            color=colors.Color(0.8, 1.0, 0.6), diffuse=0.7, specular=0.2)

        l1 = lights.Light(
            position=points.Point(-10, 10, -10),
            intensity=colors.Color(1, 1, 1)
            )

        scene = scenes.Scene(
            objects = [s1, s2],
            lights = [l1]
        )

        cam = cameras.Camera(11, 11, math.pi/2)

        from_point = points.Point(0, 0, -5)
        to_point = points.Point(0, 0, 0)
        up = vectors.Vector(0, 1, 0)
        cam.transform = transforms.ViewTransform(from_point, to_point, up)

        image = cam.render(scene)
        self.assertEqual(image.get(5, 5),
                         colors.Color(0.3807, 0.4758, 0.2855))
コード例 #5
0
    def test_rotate_camera(self):
        """A view transformation matrix looking in the +ve z direction"""

        from_point = points.Point(0, 0, 0)
        to_point = points.Point(0, 0, 1)
        up = vectors.Vector(0, 1, 0)
        result = transforms.ViewTransform(from_point, to_point, up)
        self.assertEqual(result, transforms.Scale(-1, 1, -1))
コード例 #6
0
    def test_scaling_reflection(self):
        """Test we can reflect a point about an axis using the scaling matrix"""

        S = transforms.Scale(-1, 1, 1)
        p = points.Point(-4, 6, 8)

        p2 = S * p
        self.assertEqual(p2, points.Point(4, 6, 8))
コード例 #7
0
    def test_chained_transforms(self):
        """Test we can chain together transforms with the apply function"""
        point = points.Point(1, 0, 1)

        p2 = point.apply(transforms.RotateX(math.pi/2)) \
                  .apply(transforms.Scale(5, 5, 5)) \
                  .apply(transforms.Translate(10, 5, 7))

        self.assertEqual(p2, points.Point(15, 0, 7))
コード例 #8
0
    def test_refractive_index_intersections(self):
        """Test we can calculate the refractive indices between intersections"""

        # Set up a scene with three glass spheres.  One at the origin with size
        # 2 then inside of that 2 that are offset along z by different amounts
        A = shapes.Sphere(material=materials.Material(refractive_index=1.5,
                                                      transparency=1.0))
        B = shapes.Sphere(material=materials.Material(refractive_index=2.0,
                                                      transparency=1.0))
        C = shapes.Sphere(material=materials.Material(refractive_index=2.5,
                                                      transparency=1.0))

        A.set_transform(transforms.Scale(2, 2, 2))
        B.set_transform(transforms.Translate(0, 0, -0.25))
        C.set_transform(transforms.Translate(0, 0, 0.25))

        r = rays.Ray(points.Point(0, 0, -4), vectors.Vector(0, 0, 1))

        xs = intersections.Intersections(intersections.Intersection(A, 2),
                                         intersections.Intersection(B, 2.75),
                                         intersections.Intersection(C, 3.25),
                                         intersections.Intersection(B, 4.75),
                                         intersections.Intersection(C, 5.25),
                                         intersections.Intersection(A, 6))

        expected_results = [
            {
                "n1": 1.0,
                "n2": 1.5
            },
            {
                "n1": 1.5,
                "n2": 2.0
            },
            {
                "n1": 2.0,
                "n2": 2.5
            },
            {
                "n1": 2.5,
                "n2": 2.5
            },
            {
                "n1": 2.5,
                "n2": 1.5
            },
            {
                "n1": 1.5,
                "n2": 1.0
            },
        ]

        for index, expected in enumerate(expected_results):

            comps = xs.intersections[index].precompute(r, all_intersections=xs)
            self.assertDictEqual(expected, {"n1": comps.n1, "n2": comps.n2})
コード例 #9
0
    def test_pattern_transformation(self):
        """Test that pattern is affected by a pattern transform"""

        shape = shapes.Sphere()

        p = patterns.StripePattern(WHITE, BLACK)
        p.set_transform(transforms.Scale(2, 2, 2))

        self.assertEqual(p.pattern_at_shape(shape, points.Point(1.5, 0, 0)),
                         WHITE)
コード例 #10
0
    def test_pattern_object_transformation(self):
        """Test that pattern is affected by pattern and object transforms"""

        shape = shapes.Sphere()
        shape.set_transform(transforms.Scale(2, 2, 2))

        p = patterns.StripePattern(WHITE, BLACK)
        p.set_transform(transforms.Translate(0.5, 0, 0))

        self.assertEqual(p.pattern_at_shape(shape, points.Point(2.5, 0, 0)),
                         WHITE)
コード例 #11
0
def test_net(net):

    for test_dir_img in test_lists:

        test_loader = get_loader(test_dir_img,
                                 img_size,
                                 1,
                                 mode='test',
                                 num_thread=1)

        print('''
                   Starting testing:
                       dataset: {}
                       Testing size: {}
                   '''.format(
            test_dir_img.split('/')[-1], len(test_loader.dataset)))

        for i, data_batch in enumerate(test_loader):
            print('{}/{}'.format(i, len(test_loader.dataset)))
            images, depths, image_w, image_h, image_path = data_batch
            images, depths = Variable(images.cuda()), Variable(depths.cuda())

            outputs_image, outputs_depth = net(images, depths)
            _, _, _, _, _, imageBran_output = outputs_image
            _, _, _, _, _, depthBran_output = outputs_depth

            image_w, image_h = int(image_w[0]), int(image_h[0])

            output_imageBran = F.sigmoid(imageBran_output)
            output_depthBran = F.sigmoid(depthBran_output)

            output_imageBran = output_imageBran.data.cpu().squeeze(0)
            output_depthBran = output_depthBran.data.cpu().squeeze(0)

            transform = trans.Compose(
                [transforms.ToPILImage(),
                 trans.Scale((image_w, image_h))])
            outputImageBranch = transform(output_imageBran)
            outputDepthBranch = transform(output_depthBran)

            dataset = image_path[0].split('RGBdDataset_processed')[1].split(
                '/')[1]

            filename = image_path[0].split('/')[-1].split('.')[0]

            # save image branch output
            save_test_path = save_test_path_root + dataset + '/' + test_model + '/'
            if not os.path.exists(save_test_path):
                os.makedirs(save_test_path)
            outputImageBranch.save(
                os.path.join(save_test_path, filename + '.png'))
コード例 #12
0
    def test_normal_at__transformed(self):
        """Test we can calculate normal vectors on a transformed sphere"""

        s = shapes.Sphere()
        s.set_transform(transforms.Translate(0, 1, 0))
        n = s.normal_at(points.Point(0, 1.70711, -0.70711))

        self.assertEqual(n, vectors.Vector(0, 0.70711, -0.70711))

        s.set_transform(
            transforms.Scale(1, 0.5, 1) * transforms.RotateZ(math.pi / 5))

        n = s.normal_at(points.Point(0, math.sqrt(2) / 2, -math.sqrt(2) / 2))
        self.assertEqual(n, vectors.Vector(0, 0.97014, -0.24254))
コード例 #13
0
ファイル: test_rays.py プロジェクト: craigmbooth/raytracer
    def test_ray_transforms(self):
        """Test that we can transform a ray"""

        origin = points.Point(1, 2, 3)
        direction = vectors.Vector(0, 1, 0)
        r = rays.Ray(origin, direction)

        r2 = r.transform(transforms.Translate(3, 4, 5))
        self.assertEqual(r2.origin, points.Point(4, 6, 8))
        self.assertEqual(r2.direction, vectors.Vector(0, 1, 0))

        r3 = r.transform(transforms.Scale(2, 3, 4))
        self.assertEqual(r3.origin, points.Point(2, 6, 12))
        self.assertEqual(r3.direction, vectors.Vector(0, 3, 0))
コード例 #14
0
    def test_simple_scaling(self):
        """Test that we can scale a point"""

        S = transforms.Scale(2, 3, 4)
        p = points.Point(-4, 6, 8)
        v = vectors.Vector(-4, 6, 8)

        p2 = S * p
        self.assertEqual(p2, points.Point(-8, 18, 32))

        p3 = S * v
        self.assertEqual(p3, vectors.Vector(-8, 18, 32))

        p4 = S.inverse() * p
        self.assertEqual(p4, points.Point(-2, 2, 2))
コード例 #15
0
    def test_intersections_with_transformed_ray__scaling(self):
        """Test we get the correct intersections after adding a scaling
        to a shape
        """

        s = shapes.Sphere()
        s.set_transform(transforms.Scale(2, 2, 2))

        r = rays.Ray(points.Point(0, 0, -5), vectors.Vector(0, 0, 1))

        result = s.intersect(r)
        self.assertEqual(result.intersections[0].t, 3)
        self.assertEqual(result.intersections[1].t, 7)
        self.assertEqual(result.intersections[0].shape, s)
        self.assertEqual(result.intersections[1].shape, s)
コード例 #16
0
ファイル: test.py プロジェクト: akbratt/PC_AutoFlow
def main():
    in_z = 0

    test_volpath = os.path.join(args.datapath, 'test')
    out_file = os.path.join(args.datapath, 'fakes')
    checkpoint_path = os.path.join(args.datapath, 'checkpoint.pth')
    test_segpath = test_volpath
    double_vol = False
    model = models.Net23(2)
    cuda = args.cuda
    if cuda:
        model.cuda()

    model.load_state_dict(torch.load(checkpoint_path))

    batch_size = args.batch_size
    orig_dim = 256
    sqr = transforms.Square()
    center = transforms.CenterCrop2(224)
    scale = transforms.Scale(orig_dim)
    transform_plan = [sqr, scale, center]
    num_labels = 2
    series_names = ['Mag']
    seg_series_names = ['AV']

    f = preprocess.gen_filepaths(test_segpath)

    mult_inds = []
    for i in f:
        if 'volume' in i:
            mult_inds.append(int(re.findall('\d+', i)[0]))

    mult_inds = sorted(mult_inds)

    mult_inds = np.unique(mult_inds)
    mult_inds = mult_inds[0:5]

    volpaths, segpaths = utils.get_paths(mult_inds, f, series_names, \
            seg_series_names, test_volpath, test_segpath)

    out = utils.test_net_cheap(mult_inds, in_z, model,\
            transform_plan, orig_dim, batch_size, out_file, num_labels,\
            volpaths, segpaths, nrrd=True, vol_only=double_vol,\
            get_dice=True, make_niis=False, cuda=cuda)
    out_csv = os.path.join(args.datapath, 'out.csv')
    out.to_csv(out_csv, index=False)
コード例 #17
0
ファイル: data.py プロジェクト: yilundu/Miniplaces-Pytorch-
    def __init__(self, data_path, split, augment=True, load_everything=True):
        self.count = 0
        file_path = os.path.join(data_path, 'miniplaces_256_{}.h5'.format(split))
        self.dataset = h5py.File(file_path)

        self.normalize = transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225])

        transform = [
            transforms.Scale(256),
            transforms.RandomCrop(224),
            # transforms.RandomResizedCrop(224)
            ]

        if augment:
            transform.extend([
            transforms.ColorJitter(brightness=0.1, contrast=0.0, saturation=0.3, hue=0.05),
            transforms.RandomHorizontalFlip(),
            # transforms.RandomVerticalFlip(),
            ])

        transform += [transforms.ToTensor()]

        if augment:
            transform.append(
                affine_transforms.Affine(rotation_range=5.0, zoom_range=(0.85, 1.0), fill_mode='constant')
            )
        # if augment:
        #     transform.append(
        #     affine_transforms.Affine(rotation_range=10.0, translation_range=0.1, zoom_range=(0.5, 1.0), fill_mode='constant')
        #     )

        transform += [
            self.normalize]

        self.preprocess = transforms.Compose(transform)

        self.split = split
        if split != 'test':
            self.labels = np.array(self.dataset['labels'])
        self.load_everything = load_everything
        if self.load_everything:
            self.images = np.array(self.dataset['images'])
コード例 #18
0
    def setUp(self):
        """Set up a default scene for quick testing"""

        # Inner sphere size 0.5, centered on the origin
        self.s1 = shapes.Sphere()
        self.s1.set_transform(transforms.Scale(0.5, 0.5, 0.5))

        # Outer sphere centered on the origin, size 1.0
        self.s2 = shapes.Sphere()
        self.s2.material = materials.Material(color=colors.Color(
            0.8, 1.0, 0.6),
                                              diffuse=0.7,
                                              specular=0.2)

        self.l1 = lights.Light(position=points.Point(-10, 10, -10),
                               intensity=colors.Color(1, 1, 1))

        self.default_scene = scenes.Scene(objects=[self.s1, self.s2],
                                          lights=[self.l1])
コード例 #19
0
ファイル: train.py プロジェクト: gmayday1997/ACENet
def main():

  #########  configs ###########
  best_metric = 0

  pretrain_deeplab_path = os.path.join(configs.py_dir, 'model/deeplab_coco.pth')

  ######  load datasets ########
  train_transform_det = trans.Compose([
      trans.Scale((321, 321)),
  ])
  val_transform_det = trans.Compose([
      trans.Scale((321,321)),

  ])

  train_data = voc_dates.VOCDataset(configs.train_img_dir,configs.train_label_dir,
                                    configs.train_txt_dir,'train',transform=True,
                                    transform_med = train_transform_det)
  train_loader = Data.DataLoader(train_data,batch_size=configs.batch_size,
                                 shuffle= True, num_workers= 4, pin_memory= True)

  val_data = voc_dates.VOCDataset(configs.val_img_dir,configs.val_label_dir,
                                  configs.val_txt_dir,'val',transform=True,
                                  transform_med = val_transform_det)
  val_loader = Data.DataLoader(val_data, batch_size= configs.batch_size,
                                shuffle= False, num_workers= 4, pin_memory= True)
  ######  build  models ########
  deeplab = models.deeplab()
  deeplab_pretrain_model = utils.load_deeplab_pretrain_model(pretrain_deeplab_path)
  deeplab.init_parameters(deeplab_pretrain_model)
  deeplab = deeplab.cuda()

  params = list(deeplab.parameters())
  #########

  if resume:
      checkpoint = torch.load(configs.best_ckpt_dir)
      deeplab.load_state_dict(checkpoint['state_dict'])
      print('resum sucess')

  ######### optimizer ##########
  ######## how to set different learning rate for differern layer #########
  optimizer = torch.optim.SGD(
      [
          {'params': get_parameters(deeplab, bias=False)},
          {'params': get_parameters(deeplab, bias=True),
           'lr': configs.learning_rate * 2, 'weight_decay': 0},
      ],lr=configs.learning_rate, momentum=configs.momentum,weight_decay=configs.weight_decay)

  ######## iter img_label pairs ###########

  for epoch in range(20):

      utils.adjust_learning_rate(configs.learning_rate,optimizer,epoch)
      for batch_idx, batch in enumerate(train_loader):

          img_idx, label_idx, filename,height,width = batch
          img,label = Variable(img_idx.cuda()),Variable(label_idx.cuda())
          prediction,weights = deeplab(img)
          loss = utils.cross_entropy2d(prediction,label,size_average=False)
          optimizer.zero_grad()
          loss.backward()
          optimizer.step()

          if (batch_idx) % 20 == 0:
              print("Epoch [%d/%d] Loss: %.4f" % (epoch, batch_idx, loss.data[0]))

          if (batch_idx) % 4000 == 0:

              current_metric = validate(deeplab, val_loader, epoch)
              print current_metric

      current_metric = validate(deeplab, val_loader,epoch)

      if current_metric > best_metric:

         torch.save({'state_dict': deeplab.state_dict()},
                     os.path.join(configs.save_ckpt_dir, 'deeplab' + str(epoch) + '.pth'))

         shutil.copy(os.path.join(configs.save_ckpt_dir, 'deeplab' + str(epoch) + '.pth'),
                     os.path.join(configs.save_ckpt_dir, 'model_best.pth'))
         best_metric = current_metric

      if epoch % 5 == 0:
          torch.save({'state_dict': deeplab.state_dict()},
                       os.path.join(configs.save_ckpt_dir, 'deeplab' + str(epoch) + '.pth'))
コード例 #20
0
def main():

    configs = config.VOC_config()
    transform_det = trans.Compose([
        trans.Scale((321, 321)),
    ])

    pretrain_model = os.path.join(configs.save_ckpt_dir,
                                  'deeplab_model_best_iu0.57.pth')

    test_data = dates.VOCDataset(configs.val_img_dir,
                                 configs.val_label_dir,
                                 configs.val_txt_dir,
                                 'val',
                                 transform=True,
                                 transform_med=transform_det)

    test_loader = Data.DataLoader(test_data,
                                  batch_size=configs.batch_size,
                                  shuffle=False,
                                  num_workers=4,
                                  pin_memory=True)
    ######### build vgg model ##########
    deeplab = models.deeplab()
    deeplab = deeplab.cuda()

    checkpoint = torch.load(pretrain_model)
    deeplab.load_state_dict(checkpoint['state_dict'])
    params = list(deeplab.parameters())

    save_pred_dir = os.path.join(configs.save_pred_dir, 'weights')

    if not os.path.exists(save_pred_dir):
        os.mkdir(save_pred_dir)

    deeplab.eval()

    # hook the feature extractor
    features_blobs = []

    def hook_feature(module, input, output):
        features_blobs.append(output.data.cpu().numpy())

    finalconv_name = 'conv5'  # this is the last conv layer of the network
    deeplab._modules.get(finalconv_name).register_forward_hook(hook_feature)

    for batch_idx, batch in enumerate(test_loader):
        inputs, targets, filename, height, width = batch
        inputs, targets = inputs.cuda(), targets.cuda()
        height, width, filename = height.numpy()[0], width.numpy(
        )[0], filename[0]
        transformed_img = inputs.cpu().numpy()[0]
        transformed_img = untransform(transformed_img)
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs, cnn_features = deeplab(inputs)
        transforms_rgb_rescal = cv2.resize(transformed_img, (width, height))
        #conv5_features = features_blobs[-1]
        #cnn_features_numpy = cnn_features.data.cpu().numpy()
        weights = weights_generation(cnn_features)

        save_weights_dir = os.path.join(save_pred_dir, filename)
        if batch_idx < 10:
            weights_numpy = weights.data.cpu().numpy()
            loc_weights = utils.attention_weights_collection(weights_numpy)
            utils.attention_weights_visulize(loc_weights,
                                             transforms_rgb_rescal,
                                             save_weights_dir)
        else:
            break
コード例 #21
0
def main_tr(args, crossVal):
    dataLoad = ld.LoadData(args.data_dir, args.classes)
    data = dataLoad.processData(crossVal, args.data_name)

    # load the model
    model = net.MiniSeg(args.classes, aux=True)
    if not osp.isdir(osp.join(args.savedir + '_mod' + str(args.max_epochs))):
        os.mkdir(args.savedir + '_mod' + str(args.max_epochs))
    if not osp.isdir(
            osp.join(args.savedir + '_mod' + str(args.max_epochs),
                     args.data_name)):
        os.mkdir(
            osp.join(args.savedir + '_mod' + str(args.max_epochs),
                     args.data_name))
    saveDir = args.savedir + '_mod' + str(
        args.max_epochs) + '/' + args.data_name + '/' + args.model_name
    # create the directory if not exist
    if not osp.exists(saveDir):
        os.mkdir(saveDir)

    if args.gpu and torch.cuda.device_count() > 1:
        #model = torch.nn.DataParallel(model)
        model = DataParallelModel(model)
    if args.gpu:
        model = model.cuda()

    total_paramters = sum([np.prod(p.size()) for p in model.parameters()])
    print('Total network parameters: ' + str(total_paramters))

    # define optimization criteria
    weight = torch.from_numpy(
        data['classWeights'])  # convert the numpy array to torch
    if args.gpu:
        weight = weight.cuda()

    criteria = CrossEntropyLoss2d(weight, args.ignore_label)  #weight
    if args.gpu and torch.cuda.device_count() > 1:
        criteria = DataParallelCriterion(criteria)
    if args.gpu:
        criteria = criteria.cuda()

    # compose the data with transforms
    trainDataset_main = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(args.width, args.height),
        myTransforms.RandomCropResize(int(32. / 1024. * args.width)),
        myTransforms.RandomFlip(),
        myTransforms.ToTensor()
    ])
    trainDataset_scale1 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(int(args.width * 1.5), int(args.height * 1.5)),
        myTransforms.RandomCropResize(int(100. / 1024. * args.width)),
        myTransforms.RandomFlip(),
        myTransforms.ToTensor()
    ])

    trainDataset_scale2 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(int(args.width * 1.25), int(args.height * 1.25)),
        myTransforms.RandomCropResize(int(100. / 1024. * args.width)),
        myTransforms.RandomFlip(),
        myTransforms.ToTensor()
    ])
    trainDataset_scale3 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(int(args.width * 0.75), int(args.height * 0.75)),
        myTransforms.RandomCropResize(int(32. / 1024. * args.width)),
        myTransforms.RandomFlip(),
        myTransforms.ToTensor()
    ])

    valDataset = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(args.width, args.height),
        myTransforms.ToTensor()
    ])

    # since we training from scratch, we create data loaders at different scales
    # so that we can generate more augmented data and prevent the network from overfitting
    trainLoader = torch.utils.data.DataLoader(myDataLoader.Dataset(
        data['trainIm'], data['trainAnnot'], transform=trainDataset_main),
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_workers,
                                              pin_memory=True,
                                              drop_last=True)

    trainLoader_scale1 = torch.utils.data.DataLoader(
        myDataLoader.Dataset(data['trainIm'],
                             data['trainAnnot'],
                             transform=trainDataset_scale1),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True)

    trainLoader_scale2 = torch.utils.data.DataLoader(
        myDataLoader.Dataset(data['trainIm'],
                             data['trainAnnot'],
                             transform=trainDataset_scale2),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True)
    trainLoader_scale3 = torch.utils.data.DataLoader(
        myDataLoader.Dataset(data['trainIm'],
                             data['trainAnnot'],
                             transform=trainDataset_scale3),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True)

    valLoader = torch.utils.data.DataLoader(myDataLoader.Dataset(
        data['valIm'], data['valAnnot'], transform=valDataset),
                                            batch_size=args.batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers,
                                            pin_memory=True)
    max_batches = len(trainLoader) + len(trainLoader_scale1) + len(
        trainLoader_scale2) + len(trainLoader_scale3)

    if args.gpu:
        cudnn.benchmark = True

    start_epoch = 0

    if args.pretrained is not None:
        state_dict = torch.load(args.pretrained)
        new_keys = []
        new_values = []
        for idx, key in enumerate(state_dict.keys()):
            if 'pred' not in key:
                new_keys.append(key)
                new_values.append(list(state_dict.values())[idx])
        new_dict = OrderedDict(list(zip(new_keys, new_values)))
        model.load_state_dict(new_dict, strict=False)
        print('pretrained model loaded')

    if args.resume is not None:
        if osp.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            args.lr = checkpoint['lr']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    log_file = osp.join(saveDir, 'trainValLog_' + args.model_name + '.txt')
    if osp.isfile(log_file):
        logger = open(log_file, 'a')
    else:
        logger = open(log_file, 'w')
        logger.write("Parameters: %s" % (str(total_paramters)))
        logger.write("\n%s\t%s\t\t%s\t%s\t%s\t%s\tlr" %
                     ('CrossVal', 'Epoch', 'Loss(Tr)', 'Loss(val)',
                      'mIOU (tr)', 'mIOU (val)'))
    logger.flush()

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr, (0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=1e-4)
    maxmIOU = 0
    maxEpoch = 0
    print(args.model_name + '-CrossVal: ' + str(crossVal + 1))
    for epoch in range(start_epoch, args.max_epochs):
        # train for one epoch
        cur_iter = 0

        train(args, trainLoader_scale1, model, criteria, optimizer, epoch,
              max_batches, cur_iter)
        cur_iter += len(trainLoader_scale1)
        train(args, trainLoader_scale2, model, criteria, optimizer, epoch,
              max_batches, cur_iter)
        cur_iter += len(trainLoader_scale2)
        train(args, trainLoader_scale3, model, criteria, optimizer, epoch,
              max_batches, cur_iter)
        cur_iter += len(trainLoader_scale3)
        lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr, lr = \
                train(args, trainLoader, model, criteria, optimizer, epoch, max_batches, cur_iter)

        # evaluate on validation set
        lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val = \
                val(args, valLoader, model, criteria)

        torch.save(
            {
                'epoch': epoch + 1,
                'arch': str(model),
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lossTr': lossTr,
                'lossVal': lossVal,
                'iouTr': mIOU_tr,
                'iouVal': mIOU_val,
                'lr': lr
            },
            osp.join(
                saveDir, 'checkpoint_' + args.model_name + '_crossVal' +
                str(crossVal + 1) + '.pth.tar'))

        # save the model also
        model_file_name = osp.join(
            saveDir, 'model_' + args.model_name + '_crossVal' +
            str(crossVal + 1) + '_' + str(epoch + 1) + '.pth')
        torch.save(model.state_dict(), model_file_name)

        logger.write(
            "\n%d\t\t%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.7f" %
            (crossVal + 1, epoch + 1, lossTr, lossVal, mIOU_tr, mIOU_val, lr))
        logger.flush()
        print("\nEpoch No. %d:\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f\n" \
                % (epoch + 1, lossTr, lossVal, mIOU_tr, mIOU_val))

        if mIOU_val >= maxmIOU:
            maxmIOU = mIOU_val
            maxEpoch = epoch + 1
        torch.cuda.empty_cache()
    logger.flush()
    logger.close()
    return maxEpoch, maxmIOU
コード例 #22
0
ファイル: celebA_in.py プロジェクト: yuanmengzhixing/magan
from PIL import Image
import transforms

size = 64
channel = 3
max_no = 202599
img_key = 'img_raw'
file_tpl = '%6d.jpg'
home_dir = os.path.expanduser('~')
celeb_source = os.path.join(home_dir, "Pictures/img_align_celeba")

default_attribs = {img_key: tf.FixedLenFeature([], tf.string)}

default_transf = transforms.Compose([
    transforms.Scale(size),
    transforms.CenterCrop(size),
    transforms.ToFloat(),
    transforms.Normalize(0.5, 0.5)
])


def process_celebA(dest='celebA',
                   celeb_source=celeb_source,
                   force=False,
                   transform=default_transf,
                   files=None):
    dest_file = '%s.tfr' % dest
    if os.path.exists(dest_file) and not force:
        return dest_file
コード例 #23
0
def get_loader(img_root,
               label_root,
               img_size,
               batch_size,
               mode='train',
               num_thread=1):
    shuffle = False
    mean = torch.Tensor(3, 256, 256)
    mean[0, :, :] = 125.5325
    mean[1, :, :] = 118.1743
    mean[2, :, :] = 101.3507
    # mean = torch.Tensor([123.68, 116.779, 103.939]).view(3, 1, 1) / 255
    if mode == 'train':
        transform = trans.Compose([
            trans.Scale((img_size, img_size)),
            # trans.ToTensor  image -> [0,255]
            trans.ToTensor(),
            trans.Lambda(lambda x: x - mean)
        ])
        t_transform = trans.Compose([
            trans.Scale((img_size, img_size)),
            # transform.ToTensor  label -> [0,1]
            transforms.ToTensor(),
            # transforms.Lambda(lambda x: torch.round(x))  # TODO: it maybe unnecessary
        ])
        label_32_transform = trans.Compose([
            trans.Scale((32, 32)),
            transforms.ToTensor(),
        ])
        label_64_transform = trans.Compose([
            trans.Scale((64, 64)),
            transforms.ToTensor(),
        ])
        label_128_transform = trans.Compose([
            trans.Scale((128, 128)),
            transforms.ToTensor(),
        ])
        shuffle = True
    else:
        # define transform to images
        transform = trans.Compose([
            trans.Scale((img_size, img_size)),
            trans.ToTensor(),
            trans.Lambda(lambda x: x - mean)
        ])

        # define transform to ground truth
        t_transform = trans.Compose([
            trans.Scale((img_size, img_size)),
            transforms.ToTensor(),
            #transforms.Lambda(lambda x: torch.round(x))  # TODO: it maybe unnecessary
        ])
    if mode == 'train':
        dataset = ImageData(img_root, label_root, transform, t_transform,
                            label_32_transform, label_64_transform,
                            label_128_transform)
        # print(dataset.image_path)
        data_loader = data.DataLoader(dataset=dataset,
                                      batch_size=batch_size,
                                      shuffle=shuffle,
                                      num_workers=num_thread)
        return data_loader
    else:
        dataset = ImageData(img_root,
                            label_root,
                            transform,
                            t_transform,
                            label_32_transform=None,
                            label_64_transform=None,
                            label_128_transform=None)
        # print(dataset.image_path)
        data_loader = data.DataLoader(dataset=dataset,
                                      batch_size=batch_size,
                                      shuffle=shuffle,
                                      num_workers=num_thread)
        return data_loader
コード例 #24
0
def main():

    ######### config  ###########

    best_metric = 0
    pretrain_vgg16_path = os.path.join(configs.py_dir,
                                       'model/vgg16_from_caffe.pth')

    ########  load training data ########
    ######### action 40  ############

    normalize = trans.Normalize(mean=[0.4001, 0.4401, 0.4687],
                                std=[0.229, 0.224, 0.225])
    #std=[1, 1, 1])
    train_transform = trans.Compose([
        trans.RandomCrop(224, padding=4),
        trans.RandomHorizontalFlip(),
        trans.ToTensor(),
        normalize,
    ])

    val_transform = trans.Compose([
        trans.Scale((224, 224)),
        trans.ToTensor(),
        normalize,
    ])

    train_data = imgfolder.ImageFolder(os.path.join(configs.data_dir,
                                                    'img/train'),
                                       transform=train_transform)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=configs.batch_size,
                                               shuffle=True,
                                               num_workers=4,
                                               pin_memory=True)

    val_data = imgfolder.ImageFolder(os.path.join(configs.data_dir, 'img/val'),
                                     transform=val_transform)
    val_loader = Data.DataLoader(val_data,
                                 batch_size=configs.batch_size,
                                 shuffle=False,
                                 num_workers=4,
                                 pin_memory=True)

    ######### build vgg model ##########

    vgg_cam = models.vgg_cam()
    vgg_pretrain_model = utils.load_pretrain_model(pretrain_vgg16_path)
    vgg_cam.copy_params_from_pretrain_vgg(vgg_pretrain_model,
                                          init_fc8=configs.init_random_fc8)
    vgg_cam = vgg_cam.cuda()

    ########  resume  ###########
    if resume:
        checkpoint = torch.load(
            '/media/cheer/2T/train_pytorch/cam/ckpt/model_best.pth')
        vgg_cam.load_state_dict(checkpoint['state_dict'])
    ########## optim  ###########

    optimizer = torch.optim.SGD(vgg_cam.parameters(),
                                lr=configs.learning_rate,
                                momentum=configs.momentum,
                                weight_decay=configs.weight_decay)
    #optimizer = torch.optim.Adam(vgg_cam.parameters(),lr=configs.learning_rate,weight_decay=configs.weight_decay)
    loss_fun = nn.CrossEntropyLoss()

    for epoch in range(200):

        adjust_learning_rate(optimizer, epoch)
        for step, (img_x, label_x) in enumerate(train_loader):

            img, label = Variable(img_x.cuda()), Variable(label_x.cuda())
            predict, _ = vgg_cam(img)
            loss = loss_fun(predict, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (step) % 20 == 0:
                print("Epoch [%d/%d] Loss: %.4f" % (epoch, step, loss.data[0]))

        current_metric = test(vgg_cam, val_loader, loss_fun)

        if current_metric > best_metric:

            torch.save({'state_dict': vgg_cam.state_dict()},
                       os.path.join(configs.save_ckpt_dir,
                                    'cam' + str(epoch) + '.pth'))

            shutil.copy(
                os.path.join(configs.save_ckpt_dir,
                             'cam' + str(epoch) + '.pth'),
                os.path.join(configs.save_ckpt_dir, 'model_best.pth'))
            best_metric = current_metric

        if epoch % 10 == 0:

            torch.save({'state_dict': vgg_cam.state_dict()},
                       os.path.join(configs.save_ckpt_dir,
                                    'cam' + str(epoch) + '.pth'))
コード例 #25
0
def main():
    in_z = 0
    volpath = os.path.join(args.datapath, 'train')
    segpath = volpath

    batch_size = args.batch_size
    orig_dim = 256
    sqr = transforms.Square()
    aff = transforms.Affine()
    crop = transforms.RandomCrop(224)
    scale = transforms.Scale(orig_dim)
    rotate = transforms.Rotate(0.5, 30)
    noise = transforms.Noise(0.02)
    flip = transforms.Flip()
    transform_plan = [sqr, scale, aff, rotate, crop, flip, noise]
    lr = 1e-4
    series_names = ['Mag']
    seg_series_names = ['AV']

    model = models.Net23(2)

    model.cuda()
    optimizer = optim.RMSprop(model.parameters(), lr=lr)

    out_z, center_crop_sz = utils.get_out_size(orig_dim, in_z,\
            transform_plan, model)

    t0 = time.time()

    counter = 0
    print_interval = args.log_interval
    model.train()
    for i in range(200000000000000000000000000000000):
        weight = torch.FloatTensor([0.2, 0.8]).cuda()
        vol, seg, inds = preprocess.get_batch(volpath, segpath, batch_size, in_z,\
                out_z, center_crop_sz, series_names, seg_series_names,\
                transform_plan, 8, nrrd=True)
        vol = torch.unsqueeze(vol, 1)
        vol = Variable(vol).cuda()
        seg = Variable(seg).cuda()

        out = model(vol).squeeze()

        loss = F.cross_entropy(out, seg, weight=weight)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        counter += 1

        sys.stdout.write('\r{:.2f}%'.format(counter * batch_size /
                                            print_interval))
        sys.stdout.flush()

        if counter * batch_size >= print_interval and i > 0:

            seg_hot = utils.get_hot(seg.data.cpu().numpy(), out.size()[-3])
            seg_hot = np.transpose(seg_hot, axes=[1, 0, 2, 3])
            out_hot = np.argmax(out.data.cpu().numpy(), axis=1)
            out_hot = utils.get_hot(out_hot, out.size()[-3])
            out_hot = np.transpose(out_hot, axes=[1, 0, 2, 3])

            dce2 = utils.dice(seg_hot[:, 1:], out_hot[:, 1:])

            vol_ind = utils.get_liver(seg)
            v = vol.data.cpu().numpy()[vol_ind].squeeze()
            real_seg = seg[vol_ind].data.cpu().numpy()
            out_plt = out.data.cpu().numpy()[vol_ind]
            out_plt = np.argmax(out_plt, axis=0)
            fake_seg = out_plt
            masked_real = utils.makeMask(v, real_seg, out.size()[-3], 0.5)
            masked_fake = utils.makeMask(v, fake_seg, out.size()[-3], 0.5)

            fig = plt.figure(1)
            fig.suptitle('Volume {} ; Dice = {:.2f}'.format(\
                    inds[vol_ind],dce2))
            v = fig.add_subplot(1, 2, 1)
            v.set_title('real')
            plt.imshow(masked_real)
            sreal = fig.add_subplot(1, 2, 2)
            sreal.set_title('fake')
            plt.imshow(masked_fake)
            outfile = os.path.join(args.datapath, 'out.png')
            plt.savefig(outfile, dpi=200)
            plt.clf()

            print(('\rIteration {}: Block completed in {:.2f} sec ; Loss = {:.2f}')\
                    .format(i*batch_size,time.time()-t0, loss.data.cpu()[0]))
            checkpoint_file = os.path.join(args.datapath,\
                    'model_checkpoint.pth')
            torch.save(model.state_dict(), checkpoint_file)
            counter = 0

            t0 = time.time()
コード例 #26
0
batch_size = 24
checkpoint_path = 'path to model checkpoint'
test_volpath = 'path to test volumes'
test_segpath = 'path to test segmentations'
num_labels = 3  #labels: lateral annulus, medial annulus, background

crop_size = 224
original_size = 256
out_file = 'path to store output'

################################################################

sqr = transforms.Square()
center = transforms.CenterCrop2(crop_size)
scale = transforms.Scale(original_size)
transform_plan = [sqr, scale, center]
series_names = ['echo']
seg_series_names = ['echo']

model = models.Net23(num_labels)
model.cuda()

model.load_state_dict(torch.load(checkpoint_path))

f_s = preprocess.gen_filepaths(test_segpath)
f_v = preprocess.gen_filepaths(test_volpath)

mult_inds = []
for i in f_s:
    if 'segmentation' in i:
コード例 #27
0
ファイル: dataset.py プロジェクト: nnizhang/S2MA
def get_loader(img_root, img_size, batch_size, mode='train', num_thread=1):
    shuffle = False

    mean_bgr = torch.Tensor(3, 256, 256)
    mean_bgr[0, :, :] = 104.008  # B
    mean_bgr[1, :, :] = 116.669  # G
    mean_bgr[2, :, :] = 122.675  # R

    depth_mean_bgr = torch.Tensor(1, 256, 256)
    depth_mean_bgr[0, :, :] = 115.8695

    if mode == 'train':
        transform = trans.Compose([
            # trans.ToTensor  image -> [0,255]
            trans.ToTensor_BGR(),
            trans.Lambda(lambda x: x - mean_bgr)
        ])

        depth_transform = trans.Compose([
            # trans.ToTensor  image -> [0,255]
            trans.ToTensor(),
            trans.Lambda(lambda x: x - depth_mean_bgr)
        ])

        t_transform = trans.Compose([
            # transform.ToTensor  label -> [0,1]
            transforms.ToTensor(),
        ])
        label_32_transform = trans.Compose([
            trans.Scale((32, 32), interpolation=Image.NEAREST),
            transforms.ToTensor(),
        ])
        label_64_transform = trans.Compose([
            trans.Scale((64, 64), interpolation=Image.NEAREST),
            transforms.ToTensor(),
        ])
        label_128_transform = trans.Compose([
            trans.Scale((128, 128), interpolation=Image.NEAREST),
            transforms.ToTensor(),
        ])
        shuffle = True
    else:
        transform = trans.Compose([
            trans.Scale((img_size, img_size)),
            trans.ToTensor_BGR(),
            trans.Lambda(lambda x: x - mean_bgr)
        ])

        depth_transform = trans.Compose([
            trans.Scale((img_size, img_size)),
            trans.ToTensor(),
            trans.Lambda(lambda x: x - depth_mean_bgr)
        ])

        t_transform = trans.Compose([
            trans.Scale((img_size, img_size), interpolation=Image.NEAREST),
            transforms.ToTensor(),
        ])
    if mode == 'train':
        dataset = ImageData(img_root, transform, depth_transform, t_transform, label_32_transform, label_64_transform, label_128_transform, mode)
    else:
        dataset = ImageData(img_root, transform, depth_transform, t_transform, label_32_transform=None, label_64_transform=None, label_128_transform=None, mode=mode)

    data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_thread)
    return data_loader
コード例 #28
0
ファイル: test_action40.py プロジェクト: zbxzc35/pytorch_CAM
def main():

    ########  load training data ########
    ######### action 40  ############

    normalize = trans.Normalize(mean=[0.4001, 0.4401, 0.4687],
                                std=[0.229, 0.224, 0.225])
    transform = trans.Compose([
        trans.Scale((224, 224)),
        trans.ToTensor(),
        normalize,
    ])

    test_data = imgfolder.ImageFolder(os.path.join(configs.data_dir,
                                                   'img/test'),
                                      transform=transform)
    test_loader = Data.DataLoader(test_data,
                                  batch_size=configs.batch_size,
                                  shuffle=False,
                                  num_workers=4,
                                  pin_memory=True)

    classes = {
        int(key): value
        for (key, value) in parse_json(configs.class_info_dir).items()
    }

    ######### build vgg model ##########

    vgg_cam = models.vgg_cam()
    vgg_cam = vgg_cam.cuda()
    checkpoint = torch.load(configs.best_ckpt_dir)
    vgg_cam.load_state_dict(checkpoint['state_dict'])

    # hook the feature extractor
    features_blobs = []

    def hook_feature(module, input, output):
        features_blobs.append(output.data.cpu().numpy())

    finalconv_name = 'classifier'  # this is the last conv layer of the network
    vgg_cam._modules.get(finalconv_name).register_forward_hook(hook_feature)

    # get the softmax weight
    params = list(vgg_cam.parameters())
    weight_softmax = np.squeeze(params[-1].data.cpu().numpy())

    save_cam_dir = os.path.join(configs.py_dir, 'predict')
    if not os.path.exists(save_cam_dir):
        os.mkdir(save_cam_dir)
    top_number = 5
    correct = 0
    total = 0

    for batch_idx, (inputs, targets) in enumerate(test_loader):

        inputs, targets = inputs.cuda(), targets.cuda()
        transformed_img = inputs.cpu().numpy()[0]
        target_name = classes[targets.cpu().numpy()[0]]
        transformed_img = untransform(transformed_img)
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs, _ = vgg_cam(inputs)

        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        h_x = F.softmax(outputs).data.squeeze()
        probs, idx = h_x.sort(0, True)
        prob = probs.cpu().numpy()[:top_number]
        idx_ = idx.cpu().numpy()[:top_number]
        OUT_CAM = returnCAM(features_blobs[-1], weight_softmax, idx_, prob)

        save_fig_dir = os.path.join(save_cam_dir,
                                    'cam_' + str(batch_idx) + '.jpg')
        plt.figure(1, figsize=(8, 6))
        ax = plt.subplot(231)
        img1 = transformed_img[:, :, (2, 1, 0)]
        ax.set_title(('{}').format(target_name), fontsize=14)
        ax.imshow(img1)

        for b_index, (idx, prob_in, cam) in enumerate(zip(idx_, prob,
                                                          OUT_CAM)):

            cl = str(classes[idx])
            #save_fig_dir1 = os.path.join(save_cam_dir, 'cam_cv_' + str(batch_idx) + '_' + cl + '.jpg')
            height, width, _ = transformed_img.shape
            heatmap = cv2.applyColorMap(cv2.resize(cam, (width, height)),
                                        cv2.COLORMAP_JET)
            result = heatmap * 0.3 + transformed_img * 0.7
            ax = plt.subplot(2, 3, b_index + 2)
            ax.imshow(result.astype(np.uint8)[:, :, (2, 1, 0)])
            ax.set_title(('{}:{}').format(cl, ('%.3f' % prob_in)), fontsize=8)

        plt.savefig(save_fig_dir)

        print batch_idx

    print(100. * correct / total)
コード例 #29
0
            model.load_state_dict(snapshot['model'])
            # If this doesn't work, can use optimizer.load_state_dict
            # optimizer.load_state_dict(snapshot['optimizer'])
            print('==> snapshot "{0}" loaded (epoch {1})'.format(
                args.path, epoch))
        else:
            raise FileNotFoundError('no snapshot found at "{0}"'.format(
                args.path))
    else:
        epoch = 0

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    preprocess = transforms.Compose([
        transforms.Scale(256),
        transforms.RandomCrop(224),
        transforms.ColorJitter(brightness=0.1,
                               contrast=0.0,
                               saturation=0.3,
                               hue=0.05),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        affine_transforms.Affine(rotation_range=5.0,
                                 zoom_range=(0.85, 1.0),
                                 fill_mode='constant'), normalize
    ])

    # testing the model
    images = np.load('./preprocess/miniplaces_256_test.npz')['arr_0']
    # print(images.files)
コード例 #30
0
ファイル: eval.py プロジェクト: britney-f/SALMNet
    'checkpoint':'60000',
    'val_size': [800, 288],
    'save_results': True,
    'deep_base': True,
}

mean = [0.3598, 0.3653, 0.3662]
std = [0.2573, 0.2663, 0.2756]

img_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])
mask_transform = extend_transforms.MaskToTensor()
to_pil = transforms.ToPILImage()
val_joint_transform = extend_transforms.Scale(args['val_size'])


criterion = torch.nn.CrossEntropyLoss(weight=torch.Tensor([0.4, 1, 1, 1, 1]).cuda(), size_average=True,
                                      ignore_index=culane.ignore_label)
criterion = criterion.cuda()


def main():
    net = Baseline(num_classes=culane.num_classes, deep_base=args['deep_base']).cuda()

    print('load checkpoint \'%s.pth\' for evaluation' % args['checkpoint'])
    pretrained_dict = torch.load(os.path.join(ckpt_path, exp_name, args['checkpoint'] + '_checkpoint.pth'))
    pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items()}
    net.load_state_dict(pretrained_dict)