def test_pretrained(self):
        kwargs = {
            'n_class': self.n_class,
            'pretrained_model': self.pretrained_model,
            'mean': self.mean,
        }

        if self.pretrained_model == 'imagenet':
            valid = self.n_class in {None, 1000}

        if valid:
            VGG16(**kwargs)
        else:
            with self.assertRaises(ValueError):
                VGG16(**kwargs)
예제 #2
0
def main():
    args = parse_args()

    model = VGG16(pretrained_model='imagenet')
    image = cv2.imread(args.image_path)
    image_size = (224, 224)
    image = cv2.resize(image, image_size)

    if args.gpu >= 0:
        cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()
        xp = cuda.cupy
    else:
        xp = np

    x = np.float32(image)
    x = x.transpose((2, 0, 1))[np.newaxis, ::-1, :, :]
    x -= model.mean
    x = xp.asarray(x)

    y_grad = xp.zeros((1, 1000), dtype=np.float32)
    y_grad[0, args.label] = 1.0
    gcam = gradcam.gradcam(model,
                           x, [model.conv5_3.conv, F.relu],
                           y_grad=y_grad)
    gcam = cuda.to_cpu(gcam[0])

    heatmap_image = gradcam.heatmap(gcam, image_size)
    cv2.imwrite(args.heatmap_path, heatmap_image)

    overlay_image = gradcam.overlay(image, gcam)
    cv2.imwrite(args.overlay_path, overlay_image)
예제 #3
0
def main():
    parser = argparse.ArgumentParser(
        description='Learning convnet from ILSVRC2012 dataset')
    parser.add_argument('val', help='Path to root of the validation dataset')
    parser.add_argument(
        '--model', choices=('vgg16', 'resnet50', 'resnet101', 'resnet152'))
    parser.add_argument('--pretrained_model', default='imagenet')
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--batchsize', type=int, default=32)
    parser.add_argument('--crop', choices=('center', '10'), default='center')
    parser.add_argument('--resnet_mode', default='he')
    args = parser.parse_args()

    dataset = DirectoryParsingLabelDataset(args.val)
    label_names = directory_parsing_label_names(args.val)
    n_class = len(label_names)
    iterator = iterators.MultiprocessIterator(
        dataset, args.batchsize, repeat=False, shuffle=False,
        n_processes=6, shared_mem=300000000)

    if args.model == 'vgg16':
        extractor = VGG16(n_class, args.pretrained_model)
    elif args.model == 'resnet50':
        extractor = ResNet50(
            n_class, args.pretrained_model, mode=args.resnet_mode)
    elif args.model == 'resnet101':
        extractor = ResNet101(
            n_class, args.pretrained_model, mode=args.resnet_mode)
    elif args.model == 'resnet152':
        extractor = ResNet152(
            n_class, args.pretrained_model, mode=args.resnet_mode)
    model = FeaturePredictor(
        extractor, crop_size=224, scale_size=256, crop=args.crop)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    print('Model has been prepared. Evaluation starts.')
    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    del in_values

    pred_probs, = out_values
    gt_labels, = rest_values

    accuracy = F.accuracy(
        np.array(list(pred_probs)), np.array(list(gt_labels))).data
    print()
    print('Top 1 Error {}'.format(1. - accuracy))
예제 #4
0
def load_vgg16pretrain(model, vggmodel='vgg16convs.mat'):
    import chainercv
    from chainercv.links import VGG16
    cv_model = VGG16(pretrained_model='imagenet')
    import torch
    model.conv1_1.weight.data[:] = torch.tensor(
        cv_model.conv1_1.conv.W.data[:, ::-1].copy())
    model.conv1_1.bias.data[:] = torch.tensor(cv_model.conv1_1.conv.b.data)

    model.conv1_2.weight.data[:] = torch.tensor(cv_model.conv1_2.conv.W.data)
    model.conv1_2.bias.data[:] = torch.tensor(cv_model.conv1_2.conv.b.data)

    model.conv2_1.weight.data[:] = torch.tensor(cv_model.conv2_1.conv.W.data)
    model.conv2_1.bias.data[:] = torch.tensor(cv_model.conv2_1.conv.b.data)

    model.conv2_2.weight.data[:] = torch.tensor(cv_model.conv2_2.conv.W.data)
    model.conv2_2.bias.data[:] = torch.tensor(cv_model.conv2_2.conv.b.data)

    model.conv3_1.weight.data[:] = torch.tensor(cv_model.conv3_1.conv.W.data)
    model.conv3_1.bias.data[:] = torch.tensor(cv_model.conv3_1.conv.b.data)
    model.conv3_2.weight.data[:] = torch.tensor(cv_model.conv3_2.conv.W.data)
    model.conv3_2.bias.data[:] = torch.tensor(cv_model.conv3_2.conv.b.data)
    model.conv3_3.weight.data[:] = torch.tensor(cv_model.conv3_3.conv.W.data)
    model.conv3_3.bias.data[:] = torch.tensor(cv_model.conv3_3.conv.b.data)

    model.conv4_1.weight.data[:] = torch.tensor(cv_model.conv4_1.conv.W.data)
    model.conv4_1.bias.data[:] = torch.tensor(cv_model.conv4_1.conv.b.data)
    model.conv4_2.weight.data[:] = torch.tensor(cv_model.conv4_2.conv.W.data)
    model.conv4_2.bias.data[:] = torch.tensor(cv_model.conv4_2.conv.b.data)
    model.conv4_3.weight.data[:] = torch.tensor(cv_model.conv4_3.conv.W.data)
    model.conv4_3.bias.data[:] = torch.tensor(cv_model.conv4_3.conv.b.data)

    model.conv5_1.weight.data[:] = torch.tensor(cv_model.conv5_1.conv.W.data)
    model.conv5_1.bias.data[:] = torch.tensor(cv_model.conv5_1.conv.b.data)
    model.conv5_2.weight.data[:] = torch.tensor(cv_model.conv5_2.conv.W.data)
    model.conv5_2.bias.data[:] = torch.tensor(cv_model.conv5_2.conv.b.data)
    model.conv5_3.weight.data[:] = torch.tensor(cv_model.conv5_3.conv.W.data)
    model.conv5_3.bias.data[:] = torch.tensor(cv_model.conv5_3.conv.b.data)
    print('done')
 def setUp(self):
     self.link = VGG16(n_class=self.n_class,
                       pretrained_model=None,
                       initialW=Zero())
     self.link.pick = self.pick
예제 #6
0
 def setUp(self):
     self.link = VGG16(
         n_class=self.n_class, pretrained_model=None,
         initialW=Zero())
     self.link.feature_names = self.feature_names