示例#1
0
    def __init__(self, pretrain=True):
        super(VGGFeature, self).__init__()
        with self.init_scope():
            self.block1_1 = Block(64, 3)
            self.block1_2 = Block(64, 3)
            self.block2_1 = Block(128, 3)
            self.block2_2 = Block(128, 3)
            self.block3_1 = Block(256, 3)
            self.block3_2 = Block(256, 3)
            self.block3_3 = Block(256, 3)
            self.block4_1 = Block(512, 3)
            self.block4_2 = Block(512, 3)
            self.block4_3 = Block(512, 3)
            self.block5_1 = Block(512, 3)
            self.block5_2 = Block(512, 3)
            self.block5_3 = Block(512, 3)

        if pretrain:
            print('loading VGG16Layers...')
            from chainer.links import VGG16Layers
            vgg = VGG16Layers()
            # self.block1_1.conv = vgg.conv1_1
            self.block1_2.conv = vgg.conv1_2
            self.block2_1.conv = vgg.conv2_1
            self.block2_2.conv = vgg.conv2_2
            self.block3_1.conv = vgg.conv3_1
            self.block3_2.conv = vgg.conv3_2
            self.block3_3.conv = vgg.conv3_3
            self.block4_1.conv = vgg.conv4_1
            self.block4_2.conv = vgg.conv4_2
            self.block4_3.conv = vgg.conv4_3
            self.block5_1.conv = vgg.conv5_1
            self.block5_2.conv = vgg.conv5_2
            self.block5_3.conv = vgg.conv5_3
示例#2
0
 def __init__(self, *args, **kwargs):
     self.enc_x, self.dec_x, self.enc_y, self.dec_y, self.dis_x, self.dis_y, self.dis_z = kwargs.pop(
         'models')
     params = kwargs.pop('params')
     #        self.device_id = kwargs.pop('device')
     super(Updater, self).__init__(*args, **kwargs)
     self.args = params['args']
     self.xp = self.enc_x.xp
     self._buffer_y = losses.ImagePool(50 * self.args.batch_size)
     self._buffer_x = losses.ImagePool(50 * self.args.batch_size)
     if self.args.lambda_identity_x > 0 or self.args.lambda_identity_y > 0:
         self.vgg = VGG16Layers()  # for perceptual loss
         self.vgg.to_gpu()
    def __init__(self):
        super(HandDetectGradual, self).__init__()
        with self.init_scope():
            self.featureNet = VGG16Layers()
            self.deconv1 = L.Deconvolution2D(512, 256, 4, stride=2,
                                             pad=1)  #28*28*256
            self.rconv1_1 = L.Convolution2D(256, 256, 3, stride=1, pad=1)
            self.rconv1_2 = L.Convolution2D(256, 256, 3, stride=1, pad=1)
            self.deconv2 = L.Deconvolution2D(256, 128, 4, stride=2,
                                             pad=1)  #56*56*128
            self.rconv2_1 = L.Convolution2D(128, 128, 3, stride=1, pad=1)
            self.rconv2_2 = L.Convolution2D(128, 128, 3, stride=1, pad=1)

            self.HMcov = L.Convolution2D(128, 5, 3, stride=1, pad=1)  #56*56*5
 def __init__(self, *args, **kwargs):
     self.gen_g, self.gen_f, self.dis_x, self.dis_y = kwargs.pop('models')
     params = kwargs.pop('params')
     super(Updater, self).__init__(*args, **kwargs)
     self.args = params['args']
     self._iter = 0
     self.xp = self.gen_g.xp
     self._buffer_x = losses.ImagePool(50 * self.args.batch_size)
     self._buffer_y = losses.ImagePool(50 * self.args.batch_size)
     self.init_alpha = self.get_optimizer('opt_g').alpha
     self.report_start = self.args.warmup * 10  ## start reporting
     if self.args.lambda_identity_x > 0 or self.args.lambda_identity_y > 0:
         self.vgg = VGG16Layers()  # for perceptual loss
         self.vgg.to_gpu()
示例#5
0
 def _copy_imagenet_pretrained_vgg16(self, path):
     pretrained_model = VGG16Layers(pretrained_model=path)
     self.extractor.conv1_1.copyparams(pretrained_model.conv1_1)
     self.extractor.conv1_2.copyparams(pretrained_model.conv1_2)
     self.extractor.conv2_1.copyparams(pretrained_model.conv2_1)
     self.extractor.conv2_2.copyparams(pretrained_model.conv2_2)
     self.extractor.conv3_1.copyparams(pretrained_model.conv3_1)
     self.extractor.conv3_2.copyparams(pretrained_model.conv3_2)
     self.extractor.conv3_3.copyparams(pretrained_model.conv3_3)
     self.extractor.conv4_1.copyparams(pretrained_model.conv4_1)
     self.extractor.conv4_2.copyparams(pretrained_model.conv4_2)
     self.extractor.conv4_3.copyparams(pretrained_model.conv4_3)
     self.extractor.conv5_1.copyparams(pretrained_model.conv5_1)
     self.extractor.conv5_2.copyparams(pretrained_model.conv5_2)
     self.extractor.conv5_3.copyparams(pretrained_model.conv5_3)
     self.head.fc6.copyparams(pretrained_model.fc6)
     self.head.fc7.copyparams(pretrained_model.fc7)
     self.head.fc8.copyparams(pretrained_model.fc8)
示例#6
0
def load_model(model_name, n_class):
    archs = {'nin': NIN, 'vgg16': VGG16BatchNormalization}
    model = archs[model_name](n_class=n_class)
    if model_name == 'nin':
        pass
    elif model_name == 'vgg16':
        rospack = rospkg.RosPack()
        model_path = osp.join(rospack.get_path('decopin_hand'), 'scripts',
                              'vgg16', 'VGG_ILSVRC_16_layers.npz')
        if not osp.exists(model_path):
            from chainer.dataset import download
            from chainer.links.caffe.caffe_function import CaffeFunction
            path_caffemodel = download.cached_download(
                'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel'
            )
            caffemodel = CaffeFunction(path_caffemodel)
            npz.save_npz(model_path, caffemodel, compression=False)

        vgg16 = VGG16Layers(
            pretrained_model=model_path)  # original VGG16 model
        print('Load model from {}'.format(model_path))
        for l in model.children():
            if l.name.startswith('conv'):
                # l.disable_update()  # Comment-in for transfer learning, comment-out for fine tuning
                l1 = getattr(vgg16, l.name)
                l2 = getattr(model, l.name)
                assert l1.W.shape == l2.W.shape
                assert l1.b.shape == l2.b.shape
                l2.W.data[...] = l1.W.data[...]
                l2.b.data[...] = l1.b.data[...]
            elif l.name in ['fc6', 'fc7']:
                l1 = getattr(vgg16, l.name)
                l2 = getattr(model, l.name)
                assert l1.W.size == l2.W.size
                assert l1.b.size == l2.b.size
                l2.W.data[...] = l1.W.data.reshape(l2.W.shape)[...]
                l2.b.data[...] = l1.b.data.reshape(l2.b.shape)[...]
    else:
        print('Model type {} is invalid.'.format(model_name))
        exit()

    return model
示例#7
0
 def _copy_imagenet_pretrained_vgg16(self):
     pretrained_model = VGG16Layers()
     self.extractor.conv1_1.copyparams(pretrained_model.conv1_1)
     # The pretrained weights are trained to accept BGR images.
     # Convert weights so that they accept RGB images.
     self.extractor.conv1_1.W.data[:] =\
         self.extractor.conv1_1.W.data[:, ::-1]
     self.extractor.conv1_2.copyparams(pretrained_model.conv1_2)
     self.extractor.conv2_1.copyparams(pretrained_model.conv2_1)
     self.extractor.conv2_2.copyparams(pretrained_model.conv2_2)
     self.extractor.conv3_1.copyparams(pretrained_model.conv3_1)
     self.extractor.conv3_2.copyparams(pretrained_model.conv3_2)
     self.extractor.conv3_3.copyparams(pretrained_model.conv3_3)
     self.extractor.conv4_1.copyparams(pretrained_model.conv4_1)
     self.extractor.conv4_2.copyparams(pretrained_model.conv4_2)
     self.extractor.conv4_3.copyparams(pretrained_model.conv4_3)
     self.extractor.conv5_1.copyparams(pretrained_model.conv5_1)
     self.extractor.conv5_2.copyparams(pretrained_model.conv5_2)
     self.extractor.conv5_3.copyparams(pretrained_model.conv5_3)
     self.head.fc6.copyparams(pretrained_model.fc6)
     self.head.fc7.copyparams(pretrained_model.fc7)
示例#8
0
def load_model(gpu=-1):
    """
    モデルとラベルデータを読み込む関数
    """

    global model
    global labels

    # --gpuオプションの指定状況を得るための引数パーサーを作成
    parser = argparse.ArgumentParser(description="識別器")
    parser.add_argument("--gpu", "-g", type=int, default=-1)
    args = parser.parse_args()

    # モデルをロード(初回実行時にダウンロードが発生)
    model = VGG16Layers()

    # GPUを使う場合の処理
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    # ラベルデータの読み込み
    label_file = open("synset_words.txt")
    labels = map(lambda x: x[10:], label_file.read().split('\n'))[:-1]
示例#9
0
def main():
    parser = argparse.ArgumentParser(description='Train Completion Network')
    parser.add_argument('--batch_size', '-b', type=int, default=8)
    parser.add_argument('--max_iter', '-m', type=int, default=500000)
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--eval_folder',
                        '-e',
                        default='test',
                        help='Directory to output the evaluation result')

    parser.add_argument('--eval_interval',
                        type=int,
                        default=100,
                        help='Interval of evaluating generator')

    parser.add_argument("--learning_rate",
                        type=float,
                        default=0.0002,
                        help="Learning rate")

    parser.add_argument("--load_model",
                        default='',
                        help='completion model path')

    parser.add_argument("--lambda1",
                        type=float,
                        default=6.0,
                        help='lambda for hole loss')
    parser.add_argument("--lambda2",
                        type=float,
                        default=0.05,
                        help='lambda for perceptual loss')
    parser.add_argument("--lambda3",
                        type=float,
                        default=120.0,
                        help='lambda for style loss')
    parser.add_argument("--lambda4",
                        type=float,
                        default=0.1,
                        help='lambda for tv loss')

    parser.add_argument("--flip",
                        type=int,
                        default=1,
                        help='flip images for data augmentation')
    parser.add_argument("--resize_to",
                        type=int,
                        default=256,
                        help='resize the image to')
    parser.add_argument("--crop_to",
                        type=int,
                        default=256,
                        help='crop the resized image to')
    parser.add_argument("--load_dataset",
                        default='place2_train',
                        help='load dataset')
    #parser.add_argument("--layer_n", type=int, default=7, help='number of layers')

    parser.add_argument("--learning_rate_anneal",
                        type=float,
                        default=0,
                        help='anneal the learning rate')
    parser.add_argument("--learning_rate_anneal_interval",
                        type=int,
                        default=1000,
                        help='time to anneal the learning')

    args = parser.parse_args()
    print(args)

    max_iter = args.max_iter

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()

    #load completion model
    model = getattr(net, "PartialConvCompletion")(ch0=3,
                                                  input_size=args.crop_to)

    #load vgg_model
    print("loading vgg16 ...")
    vgg = VGG16Layers()
    print("ok")

    if args.load_model != '':
        serializers.load_npz(args.load_model, model)
        print("Completion model loaded")

    if not os.path.exists(args.eval_folder):
        os.makedirs(args.eval_folder)

    # select GPU
    if args.gpu >= 0:
        model.to_gpu()
        vgg.to_gpu()
        print("use gpu {}".format(args.gpu))

    # Setup an optimizer
    def make_optimizer(model, name="Adam", learning_rate=0.0002):
        #optimizer = chainer.optimizers.AdaDelta()
        #optimizer = chainer.optimizers.SGD(lr=alpha)
        if name == "Adam":
            optimizer = chainer.optimizers.Adam(alpha=learning_rate, beta1=0.5)
        elif name == "SGD":
            optimizer = chainer.optimizer.SGD(lr=learning_rate)
        optimizer.setup(model)
        return optimizer

    opt_model = make_optimizer(model, "Adam", args.learning_rate)

    train_dataset = getattr(datasets,
                            args.load_dataset)(paths.train_place2,
                                               mask_path="mask/256",
                                               flip=args.flip,
                                               resize_to=args.resize_to,
                                               crop_to=args.crop_to)
    train_iter = chainer.iterators.MultiprocessIterator(train_dataset,
                                                        args.batch_size,
                                                        n_processes=4)

    #val_dataset = getattr(datasets, args.load_dataset)(flip=0, resize_to=args.resize_to, crop_to=args.crop_to)
    #val_iter = chainer.iterators.MultiprocessIterator(
    #    val_dataset, args.batchsize, n_processes=4)

    #test_dataset = horse2zebra_Dataset_train(flip=args.flip, resize_to=args.resize_to, crop_to=args.crop_to)

    test_iter = chainer.iterators.SerialIterator(train_dataset, 8)

    # Set up a trainer
    updater = Updater(
        models=(vgg, model),
        iterator={
            'main': train_iter,
            #'dis' : train2_iter,
            'test': test_iter
        },
        optimizer={
            'model': opt_model,
        },
        device=args.gpu,
        params={
            'lambda1': args.lambda1,
            'lambda2': args.lambda2,
            'lambda3': args.lambda3,
            'lambda4': args.lambda4,
            'image_size': args.crop_to,
            'eval_folder': args.eval_folder,
            #'learning_rate_anneal' : args.learning_rate_anneal,
            #'learning_rate_anneal_interval' : args.learning_rate_anneal_interval,
            'dataset': train_dataset
        })

    model_save_interval = (4000, 'iteration')
    trainer = training.Trainer(updater, (max_iter, 'iteration'), out=args.out)
    #trainer.extend(extensions.snapshot_object(
    #    gen_g, 'gen_g{.updater.iteration}.npz'), trigger=model_save_interval)
    trainer.extend(extensions.snapshot_object(model,
                                              'model{.updater.iteration}.npz'),
                   trigger=model_save_interval)

    log_keys = [
        'epoch', 'iteration', 'L_valid', 'L_hole', 'L_perceptual', 'L_style',
        'L_tv'
    ]
    trainer.extend(
        extensions.LogReport(keys=log_keys, trigger=(20, 'iteration')))
    trainer.extend(extensions.PrintReport(log_keys), trigger=(20, 'iteration'))
    trainer.extend(extensions.ProgressBar(update_interval=50))

    trainer.extend(evaluation(model, args.eval_folder,
                              image_size=args.crop_to),
                   trigger=(args.eval_interval, 'iteration'))

    # Run the training
    trainer.run()
示例#10
0
        print('Loading {:s}..'.format(modelfn))
        serializers.load_npz(modelfn, dec)
        if args.gpu >= 0:
            enc.to_gpu()
            dec.to_gpu()
        xp = enc.xp
        is_AE = True
    else:
        gen = F.identity
        xp = np
        is_AE = False
        print("Identity...")

    ## prepare networks for analysis
    if args.output_analysis:
        vgg = VGG16Layers()  # for perceptual loss
        vgg.to_gpu()
        if is_AE:
            enc_i = net.Encoder(args)
            dec_i = net.Decoder(args)
            dis = net.Discriminator(args)
            dis_i = net.Discriminator(args)
            if "enc_x" in args.load_models:
                models = {
                    'enc_y': enc_i,
                    'dec_x': dec_i,
                    'dis_x': dis_i,
                    'dis_y': dis
                }
            else:
                models = {
示例#11
0
import numpy as np
from PIL import Image
from chainer import Variable
from chainer.links import VGG16Layers
from chainer.links.caffe import CaffeFunction

from analysis import Analysis

model = VGG16Layers()

img = Image.open("path/to/image.jpg")
feature = model.extract([img], layers=["fc7"])["fc7"]

# Load the model
func = CaffeFunction('bvlc_googlenet.caffemodel')

# Minibatch of size 10
x_data = np.ndarray((10, 3, 227, 227), dtype=np.float32)

# Forward the pre-trained net
x = Variable(x_data)
y, = func(inputs={'data': x}, outputs=['fc8'])

# create caffemodel neural network

# create analysis object
ana = Analysis(ann.model, fname='tmp')

# handle sequential data; deal with classifier analysis separately

# analyse data
示例#12
0
import chainer
from chainer.links import VGG16Layers
from PIL import Image
import argparse
import numpy as np

MODEL = VGG16Layers()


def _create_db(paths, gpu):
    if gpu >= 0:
        chainer.cuda.get_device_from_id(gpu).use()
        MODEL.to_gpu()
    with chainer.using_config('train', False):
        features = np.asarray([
            chainer.cuda.to_cpu(
                MODEL.extract([Image.open(path, 'r').convert('RGB')], ['fc7'],
                              size=(224, 224))['fc7'].data) for path in paths
        ], np.float32)
    print('dataset size : {}'.format(len(features)))
    return features


def create_db(dir, gpu):
    from glob import glob
    import os
    temp = os.path.join(dir, '*.png')
    paths = glob(temp)
    assert len(paths) != 0
    if not os.path.exists('db'):
        os.mkdir('db')
示例#13
0
import numpy as np
import matplotlib

matplotlib.use("Agg")
import matplotlib.pyplot as plt

if __name__ == "__main__":

    filename = "images/dog_cat.png"
    images = [Image.open(filename)]
    # target_label = 282
    target_label = 242
    # target_label = -1

    # Grad-CAM
    vgg = VGG16Layers()
    grad_cam = GradCAM(vgg, "conv5_3", "prob")
    L_gcam = grad_cam.feed(images, target_label)

    # Grad-CAM++
    grad_cam_pp = GradCAM_PP(vgg, "conv5_3", "prob")
    L_gcampp = grad_cam_pp.feed(images, target_label)

    # Guided backprob
    gvgg = GuidedVGG16()
    guided_backprop = GuidedBackprop(gvgg, "input", "prob")
    R_0 = guided_backprop.feed(images, target_label)

    # Guided Grad-CAM
    ggrad_cam = R_0 * L_gcam[:, :, np.newaxis]
    ggrad_cam -= ggrad_cam.min()
def main():
    parser = argparse.ArgumentParser(description='Train Completion Network')
    parser.add_argument('--batch_size', '-b', type=int, default=8)
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--eval_folder',
                        '-e',
                        default='generated_results',
                        help='Directory to output the evaluation result')

    parser.add_argument("--load_model", help='completion model path')

    parser.add_argument("--resize_to",
                        type=int,
                        default=256,
                        help='resize the image to')
    parser.add_argument("--crop_to",
                        type=int,
                        default=256,
                        help='crop the resized image to')
    parser.add_argument("--load_dataset",
                        default='place2_test',
                        help='load dataset')
    #parser.add_argument("--layer_n", type=int, default=7, help='number of layers')

    args = parser.parse_args()
    print(args)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()

    #load completion model
    model = getattr(net, "PartialConvCompletion")(ch0=3,
                                                  input_size=args.crop_to)

    #load vgg_model
    print("loading vgg16 ...")
    vgg = VGG16Layers()
    print("ok")

    if args.load_model != '':
        serializers.load_npz(args.load_model, model)
        print("Completion model loaded")

    if not os.path.exists(args.eval_folder):
        os.makedirs(args.eval_folder)

    # select GPU
    if args.gpu >= 0:
        model.to_gpu()
        vgg.to_gpu()
        print("use gpu {}".format(args.gpu))

    val_dataset = getattr(datasets,
                          args.load_dataset)(paths.val_place2,
                                             mask_path="mask/256",
                                             resize_to=args.resize_to,
                                             crop_to=args.crop_to)
    val_iter = chainer.iterators.SerialIterator(val_dataset, args.batch_size)

    #test_dataset = horse2zebra_Dataset_train(flip=args.flip, resize_to=args.resize_to, crop_to=args.crop_to)

    #test_iter = chainer.iterators.SerialIterator(train_dataset, 8)

    #generate results
    xp = model.xp
    batch = val_iter.next()
    batchsize = len(batch)

    image_size = args.crop_to
    x = xp.zeros((batchsize, 3, image_size, image_size)).astype("f")
    m = xp.zeros((batchsize, 3, image_size, image_size)).astype("f")

    for i in range(batchsize):
        x[i, :] = xp.asarray(batch[i][0])
        m[i, :] = xp.asarray(batch[i][1])
    mask_b = xp.array(m.astype("bool"))

    I_gt = Variable(x)
    M = Variable(m)
    M_b = Variable(mask_b)

    I_out = model(x, m)
    I_comp = F.where(M_b, I_gt, I_out)

    img = x.get()

    img = batch_postprocess_images(img, batchsize, 1)
    Image.fromarray(img).save(args.eval_folder + "/generated_3_Igt.jpg")

    img = I_comp.data.get()

    img = batch_postprocess_images(img, batchsize, 1)
    Image.fromarray(img).save(args.eval_folder + "/generated_2_Icomp.jpg")

    img = I_out.data.get()

    img = batch_postprocess_images(img, batchsize, 1)
    Image.fromarray(img).save(args.eval_folder + "/generated_1_Iout.jpg")

    img = M.data.get()

    img = batch_postprocess_images(img, batchsize, 1)
    Image.fromarray(img).save(args.eval_folder + "/generated_0_mask.jpg")
示例#15
0
    def __init__(self,
                 n_fg_class=None,
                 pretrained_model=None,
                 min_size=config.IMG_SIZE[0],
                 max_size=config.IMG_SIZE[0],
                 vgg_initialW=None,
                 score_initialW=None,
                 mean_file=None,
                 extract_len=None,
                 fix=False):
        if n_fg_class is None:
            if pretrained_model not in self._models:
                raise ValueError(
                    'The n_fg_class needs to be supplied as an argument')
            n_fg_class = len(config.AU_SQUEEZE)
        if score_initialW is None:
            score_initialW = chainer.initializers.Normal(0.01)
        if vgg_initialW is None and pretrained_model:
            vgg_initialW = chainer.initializers.constant.Zero()

        extractor = VGG16FeatureExtractor(initialW=vgg_initialW, fix=fix)
        head = VGG16RoIHead(
            n_fg_class,  # 注意:全0表示背景。010101才表示多label,因此无需一个特别的0的神经元节点
            roi_size=7,
            spatial_scale=1. / self.
            feat_stride,  # 1/ 16.0 means after extract feature map, the map become 1/16 of original image, ROI bbox also needs shrink
            vgg_initialW=vgg_initialW,
            score_initialW=score_initialW,
            extract_len=extract_len)

        mean_array = np.load(mean_file)
        print("loading mean_file in: {} done".format(mean_file))
        super(FasterRCNNVGG16, self).__init__(extractor,
                                              head,
                                              mean=mean_array,
                                              min_size=min_size,
                                              max_size=max_size)

        if pretrained_model in self._models and 'url' in self._models[
                pretrained_model]:
            path = download_model(self._models[pretrained_model]['url'])
            chainer.serializers.load_npz(path, self)
        elif pretrained_model == 'vgg':  # 只会走到这一elif里
            print("loading:{} imagenet pretrained model".format(
                self._models['imagenet']['path']))

            model_path = self._models['imagenet']['path']
            if model_path.endswith(".caffemodel"):
                caffe_model = CaffeFunction(model_path)
                chainer_model = VGG16Layers(pretrained_model=None)
                self._transfer_vgg(caffe_model, chainer_model)
                chainer_model_save_path = "{}/VGG_ILSVRC_16_layers.npz".format(
                    os.path.dirname(model_path))
                chainer.serializers.save_npz(chainer_model_save_path,
                                             chainer_model)
                self._copy_imagenet_pretrained_vgg16(
                    path=chainer_model_save_path)
            elif model_path.endswith(".npz"):
                self._copy_imagenet_pretrained_vgg16(path=model_path)

        elif pretrained_model == "vgg_face":
            model_path = self._models['vgg_face']['path']
            if model_path.endswith(".caffemodel"):
                caffe_model = CaffeFunction(model_path)
                chainer_model = VGG16Layers(pretrained_model=None)
                self._transfer_vgg(caffe_model, chainer_model)
                chainer_model_save_path = "{}/vgg_face.npz".format(
                    os.path.dirname(model_path))
                chainer.serializers.save_npz(chainer_model_save_path,
                                             chainer_model)
                self._copy_imagenet_pretrained_vgg16(
                    path=chainer_model_save_path)
            elif model_path.endswith(".npz"):
                if os.path.exists(model_path):
                    print("loading vgg_face {}".format(model_path))
                    self._copy_imagenet_pretrained_vgg16(path=model_path)

        elif pretrained_model.endswith("npz"):
            print("loading :{} to AU R-CNN VGG16".format(pretrained_model))
            chainer.serializers.load_npz(
                pretrained_model,
                self)  #FIXME 我修改了最后加了一层fc8, 变成1024维向量,但是无法load
示例#16
0
    def __init__(self, n_units=256, n_out=0, img_size=112, var=0.18, n_step=2, gpu_id=-1):
        super(BASE, self).__init__(
            # the size of the inputs to each layer will be inferred
            # glimpse network
            # 切り取られた画像を処理する部分 位置情報 (glimpse loc)と画像特徴量の積を出力
            # in 256 * 256 * 3
            g_full=L.Linear(4096, 256),
            glimpse_loc=L.Linear(3, 256),

            norm_1_1=L.BatchNormalization(64),
            norm_1_2=L.BatchNormalization(64),
            norm_2_1=L.BatchNormalization(128),
            norm_2_2=L.BatchNormalization(128),
            norm_3_1=L.BatchNormalization(256),
            norm_3_2=L.BatchNormalization(256),
            norm_3_3=L.BatchNormalization(256),
            norm_f1=L.BatchNormalization(256),

            # 記憶を用いるLSTM部分
            rnn_1=L.LSTM(n_units, n_units),
            rnn_2=L.LSTM(n_units, n_units),

            # 注意領域を選択するネットワーク
            attention_loc=L.Linear(n_units, 2),
            attention_scale=L.Linear(n_units, 1),

            # 入力画像を処理するネットワーク
            context_cnn_1=L.Convolution2D(3, 64, 3, pad=1),
            context_cnn_2=L.Convolution2D(64, 64, 3, pad=1),
            context_cnn_3=L.Convolution2D(64, 64, 3, pad=1),
            context_cnn_4=L.Convolution2D(64, 64, 3, pad=1),
            context_cnn_5=L.Convolution2D(64, 64, 3, pad=1),
            context_full=L.Linear(16 * 16 * 64, n_units),

            l_norm_cc1=L.BatchNormalization(64),
            l_norm_cc2=L.BatchNormalization(64),
            l_norm_cc3=L.BatchNormalization(64),
            l_norm_cc4=L.BatchNormalization(64),
            l_norm_cc5=L.BatchNormalization(64),

            # baseline network 強化学習の期待値を学習し、バイアスbとする
            baseline=L.Linear(n_units, 1),

            class_full=L.Linear(n_units, n_out)
        )

        #
        # img parameter
        #
        self.vgg_model = VGG16Layers()
        if gpu_id == 0:
            self.use_gpu = True
            self.vgg_model.to_gpu()
        else:
            self.use_gpu = False
        self.img_size = img_size
        self.gsize = 32
        self.train = True
        self.var = var
        self.vars = var
        self.n_unit = n_units
        self.num_class = n_out
        # r determine the rate of position
        self.r = 0.5
        self.r_recognize = 1.0
        self.n_step = n_step
示例#17
0
        refineNet = self.featureNet.copy(mode='share')
        for i, layer in enumerate(refineNet.children()):
            #             print(layer.W.shape)
            if i > 12:
                break
            if i >= 4:
                #                 print(i)
                h = F.relu(layer(h))
            if i in [6, 9]:
                h = F.max_pooling_2d(h, 2, stride=2)


#         print(h.data.shape)
        h = F.relu(self.deconv1(h))
        h = F.relu(self.rconv1_1(h))
        h = F.relu(self.rconv1_2(h))
        h = F.relu(self.deconv2(h))
        h = F.relu(self.rconv2_1(h))
        h = F.relu(self.rconv2_2(h))
        return h

    def __call__(self, x):
        h = self.image_to_map(x)
        h = self.map_to_map(h)
        h = F.sigmoid(self.HMcov(h))
        return h

if __name__ == '__main__':
    a = VGG16Layers()
    for i in a.children():
        print(i.W.shape)
示例#18
0
           for item in similarities[:_NUM_SIMILAR_IMAGES]]
    for i, item in enumerate(ret):
        ext = ""
        mime = item['mime']
        if mime == "image/jpeg":
            ext = ".jpg"
        elif mime == "image/png":
            ext = ".png"
        elif mime == "image/gif":
            ext = ".gif"
        item['fname'] = str(item['id']) + ext

    return flask.jsonify(ret)


_vgg16 = VGG16Layers(pretrained_model="./VGG_ILSVRC_16_layers.npz")


@app.route("/image/<id>/extract_feature", methods=["GET"])
def extract_feature(id):
    global _image_features_dict

    cursor = db().cursor()
    cursor.execute("SELECT imgdata FROM posts where id = %s", (id,))
    result = cursor.fetchone()

    img = Image.open(io.BytesIO(result["imgdata"]))
    img_np = np.array(img)

    feat = _vgg16.extract(img_np[np.newaxis, :], layers=["fc7"])["fc7"].data
    np.save(os.path.join(FEATURES_DIR, "{}.npy".format(id)), feat)
示例#19
0
 def __init__(self, alpha=[0, 0, 1, 1], beta=[1, 1, 1, 1]):
     from chainer.links import VGG16Layers
     print("load model... vgg_chainer")
     self.model = VGG16Layers()
     self.alpha = alpha
     self.beta = beta
示例#20
0
import chainer 
import numpy as np
import chainer.function as F
from chainer import training
import gensim
chainer.functions.concat

#from chainer.training import extension 

# call pre-traind VGG16
from chainer.links import VGG16Layers
model_1 = VGG16Layers()
print (model_1)
#model_1 = F.flatten(model_1)

# call word2vec
from gensim.models import word2vec
data = word2vec.Text8Corpus('corpus80.txt')
#model = word2vec.Word2Vec(data, size=200)
model = word2vec.Word2Vec(data, size=300)
# model = F.flatten(model)

#out = model.most_similar(positive=['delicious', 'meal'])
#for x in out:
#    print(x[0], x[1])

# Concatenates two layers
new_layers = F.concat(model, model_2, axis=1)


# Full coneccted alyer