Пример #1
0
def convert_caffe_model(load_path, save_path):
    #Caffeモデルを読み込み
    caffe_model = CaffeFunction(load_path)
    #pickleモデルに変換して保存
    pickle.dump(caffe_model, open(save_path, 'wb'))
    #デバック
    print("[Save] {0}".format(save_path))
def _make_chainermodel_npz(path_npz, path_caffemodel, model, num_class):
    print('Now loading caffemodel (usually it may take few minutes)')
    if not os.path.exists(path_caffemodel):
        raise IOError('The pre-trained caffemodel does not exist.')
    caffemodel = CaffeFunction(path_caffemodel)
    chainermodel = DualCenterProposalNetworkRes50_predict7(n_class=num_class)
    _transfer_pretrain_resnet50(caffemodel, chainermodel)
    classifier_model = L.Classifier(chainermodel)
    serializers.save_npz(path_npz, classifier_model, compression=False)
    print('model npz is saved')
    serializers.load_npz(path_npz, model)
    return model
Пример #3
0
def load_caffemodel(org_path, dump=True):
    pkl_path = org_path + '.pkl'
    if os.path.exists(pkl_path):
        logger.info('Load pkl model: %s' % pkl_path)
        model = pickle.load(open(pkl_path, 'rb'))
    else:
        if not os.path.exists(org_path):
            logger.error('Failed to load caffe model: %s' % org_path)
            return None
        logger.info('Load caffe model: %s' % org_path)
        model = CaffeFunction(org_path)
        if dump:
            logger.info('Save pkl model: %s' % pkl_path)
            pickle.dump(model, open(pkl_path, 'wb'))
    return model
Пример #4
0
def _make_chainermodel_npz(path_npz, path_caffemodel, model, num_class, v2=True):
    print('Now loading caffemodel (usually it may take few minutes)')
    if not os.path.exists(path_caffemodel):
        raise IOError('The pre-trained caffemodel does not exist.')
    caffemodel = CaffeFunction(path_caffemodel)
    if v2:
        chainermodel = DepthInvariantNetworkRes50FCNVer2(n_class=num_class)
        _transfer_pretrain_resnet50(caffemodel, chainermodel, use_res5=False)
    else:
        chainermodel = DepthInvariantNetworkRes50FCN(n_class=num_class)
        _transfer_pretrain_resnet50(caffemodel, chainermodel, use_res5=True)
    classifier_model = L.Classifier(chainermodel)
    serializers.save_npz(path_npz, classifier_model, compression=False)
    print('model npz is saved')
    serializers.load_npz(path_npz, model)
    return model
    def __init__(self, n_classes, unchain=True):
        w = chainer.initializers.HeNormal()
        model = CaffeFunction(
            self.CAFFEMODEL_FN)  # CaffeModelを読み込んで保存します。(時間がかかります)
        del model.encode1  # メモリ節約のため不要なレイヤを削除します。
        del model.encode2
        del model.forwards['encode1']
        del model.forwards['encode2']
        model.layers = model.layers[:-2]

        super(Illust2Vec, self).__init__()
        with self.init_scope():
            self.trunk = model  # 元のIllust2Vecモデルをtrunkとしてこのモデルに含めます。
            self.fc7 = L.Linear(None, 4096, initialW=w)
            self.bn7 = L.BatchNormalization(4096)
            self.fc8 = L.Linear(4096, n_classes, initialW=w)
Пример #6
0
def make_i2v_with_chainer(param_path, tag_path=None, threshold_path=None):
    # ignore UserWarnings from chainer
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        net = CaffeFunction(param_path)

    kwargs = {}
    if tag_path is not None:
        tags = json.loads(open(tag_path, 'r').read())
        assert(len(tags) == 1539)
        kwargs['tags'] = tags

    if threshold_path is not None:
        fscore_threshold = np.load(threshold_path)['threshold']
        kwargs['threshold'] = fscore_threshold

    return ChainerI2V(net, **kwargs)
Пример #7
0
def main():

    parser = argparse.ArgumentParser( \
            description="""Convert caffemodel SSD, and load pose model,
            and the save PoseSsdNet model.""")
    parser.add_argument("--pose_model_path",
                        default="coco_posenet.npz",
                        help="Path to the pose model in chainer")
    parser.add_argument(
        "--ssd_model_path",
        default=
        "ssd/VGGNet/coco/SSD_300x300/VGG_coco_SSD_300x300_iter_400000.caffemodel",
        help="Path to the SSD model in caffe")
    parser.add_argument("--save_model_path",
                        default="coco_pose_ssd_net.npz",
                        help="Path to the pose ssd model")
    args = parser.parse_args()

    n_class = 80  # coco
    pose_ssd_model = CocoPoseSsdNet(n_class)
    print("Loaded pose_ssd_net")

    # copy pose net params
    pose_model = CocoPoseNet()
    load_npz(args.pose_model_path, pose_model)
    print("Loaded pose_net")
    for src_lname, dst_lname in zip(pose_layers, my_pose_layer):
        copy_conv_net(pose_model, pose_ssd_model, src_lname, dst_lname)

    # copy ssd net params
    ssd_model = CaffeFunction(args.ssd_model_path)
    print("Loaded caffe_ssd_net")
    for src_lname, dst_lname in zip(ssd_layers, my_ssd_layers):
        if src_lname == 'norm4':
            copy_ssd_norm_layer(args.ssd_model_path, pose_ssd_model)
        else:
            copy_conv_net(ssd_model, pose_ssd_model, src_lname, dst_lname)

    # save pose ssd model
    save_npz(args.save_model_path, pose_ssd_model)
Пример #8
0
import argparse
import pickle
from chainer.links.caffe import CaffeFunction

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Convert from caffe model to chainer model')
    parser.add_argument('-m',
                        '--model',
                        type=str,
                        required=True,
                        help='path to caffe model file')
    parser.add_argument('-o',
                        '--out',
                        type=str,
                        default=None,
                        help='path to output file')
    args = parser.parse_args()

    if not args.out:
        outfile = (args.model).rsplit('.', 1)[0] + '.chainermodel.pkl'
    else:
        outfile = args.model
    vgg = CaffeFunction(args.model)
    pickle.dump(vgg, open(outfile, 'wb'))
Пример #9
0
import chainer
from chainer import serializers
from chainer.links.caffe import CaffeFunction

import config
from ssd import SSD300

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('source')
    parser.add_argument('target')
    parser.add_argument('--baseonly', action='store_true')
    parser.set_defaults(baseonly=False)
    args = parser.parse_args()

    caffe_model = CaffeFunction(args.source)
    model = SSD300(n_class=20, aspect_ratios=config.aspect_ratios)

    model.base.conv1_1.copyparams(caffe_model.conv1_1)
    model.base.conv1_2.copyparams(caffe_model.conv1_2)

    model.base.conv2_1.copyparams(caffe_model.conv2_1)
    model.base.conv2_2.copyparams(caffe_model.conv2_2)

    model.base.conv3_1.copyparams(caffe_model.conv3_1)
    model.base.conv3_2.copyparams(caffe_model.conv3_2)
    model.base.conv3_3.copyparams(caffe_model.conv3_3)

    model.base.conv4_1.copyparams(caffe_model.conv4_1)
    model.base.conv4_2.copyparams(caffe_model.conv4_2)
    model.base.conv4_3.copyparams(caffe_model.conv4_3)
Пример #10
0
import numpy as np
from PIL import Image
from chainer import Variable
from chainer.links import VGG16Layers
from chainer.links.caffe import CaffeFunction

from analysis import Analysis

model = VGG16Layers()

img = Image.open("path/to/image.jpg")
feature = model.extract([img], layers=["fc7"])["fc7"]

# Load the model
func = CaffeFunction('bvlc_googlenet.caffemodel')

# Minibatch of size 10
x_data = np.ndarray((10, 3, 227, 227), dtype=np.float32)

# Forward the pre-trained net
x = Variable(x_data)
y, = func(inputs={'data': x}, outputs=['fc8'])

# create caffemodel neural network

# create analysis object
ana = Analysis(ann.model, fname='tmp')

# handle sequential data; deal with classifier analysis separately

# analyse data
Пример #11
0
from chainer.links.caffe import CaffeFunction
import os
import pickle
if os.path.exists("vgg.pkl"):
    with open("vgg.pkl", "rb") as f:
        vgg = pickle.load(f)
else:
    vgg = CaffeFunction("VGG_ILSVRC_19_layers.caffemodel")
    with open("vgg.pkl", "wb") as f:
        pickle.dump(vgg, f)

for l in vgg.layers:
    name, inp, out = l
    if hasattr(vgg, name):
        print(name, ":", inp[0], "->", out[0])
    else:
        print(name)

print()

for l in vgg.children():
    print(l.W.shape, l.name)
Пример #12
0
# -*- coding: utf-8 -*-
import cPickle as pickle
from chainer.links.caffe import CaffeFunction

vgg = CaffeFunction('../caffenet.caffemodel')
with open("./caffenet.pkl", 'wb') as fo:
    pickle.dump(vgg, fo)
Пример #13
0
def convert_caffe2chainer():

    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", default="dataset")
    args = parser.parse_args()

    print('start loading model file...')
    caffe_model = CaffeFunction('googlenet.caffemodel')
    print('Done.')

    # copy parameters from caffemodel into chainer model
    print('start copy params.')
    b, _, _ = dataset_label(args.dataset)
    googlenet = GoogleNetBN(n_class=len(b))

    googlenet.conv1.W.data = caffe_model['conv1/7x7_s2'].W.data
    googlenet.conv2.W.data = caffe_model['conv2/3x3'].W.data
    """Inception module of the new GoogLeNet with BatchNormalization."""
    # inc3a
    googlenet.inc3a.conv1.W.data = caffe_model['inception_3a/1x1'].W.data
    googlenet.inc3a.conv3.W.data = caffe_model['inception_3a/3x3'].W.data
    googlenet.inc3a.conv33a.W.data = caffe_model[
        'inception_3a/double3x3a'].W.data
    googlenet.inc3a.conv33b.W.data = caffe_model[
        'inception_3a/double3x3b'].W.data
    googlenet.inc3a.proj3.W.data = caffe_model[
        'inception_3a/3x3_reduce'].W.data
    googlenet.inc3a.proj33.W.data = caffe_model[
        'inception_3a/double3x3_reduce'].W.data
    googlenet.inc3a.poolp.W.data = caffe_model['inception_3a/pool_proj'].W.data

    # inc3b
    googlenet.inc3b.conv1.W.data = caffe_model['inception_3b/1x1'].W.data
    googlenet.inc3b.conv3.W.data = caffe_model['inception_3b/3x3'].W.data
    googlenet.inc3b.conv33a.W.data = caffe_model[
        'inception_3b/double3x3a'].W.data
    googlenet.inc3b.conv33b.W.data = caffe_model[
        'inception_3b/double3x3b'].W.data
    googlenet.inc3b.proj3.W.data = caffe_model[
        'inception_3b/3x3_reduce'].W.data
    googlenet.inc3b.proj33.W.data = caffe_model[
        'inception_3b/double3x3_reduce'].W.data
    googlenet.inc3b.poolp.W.data = caffe_model['inception_3b/pool_proj'].W.data

    # inc3c
    googlenet.inc3c.conv3.W.data = caffe_model['inception_3c/3x3'].W.data
    googlenet.inc3c.conv33a.W.data = caffe_model[
        'inception_3c/double3x3a'].W.data
    googlenet.inc3c.conv33b.W.data = caffe_model[
        'inception_3c/double3x3b'].W.data
    googlenet.inc3c.proj3.W.data = caffe_model[
        'inception_3c/3x3_reduce'].W.data
    googlenet.inc3c.proj33.W.data = caffe_model[
        'inception_3c/double3x3_reduce'].W.data

    # inc4a
    googlenet.inc4a.conv1.W.data = caffe_model['inception_4a/1x1'].W.data
    googlenet.inc4a.conv3.W.data = caffe_model['inception_4a/3x3'].W.data
    googlenet.inc4a.conv33a.W.data = caffe_model[
        'inception_4a/double3x3a'].W.data
    googlenet.inc4a.conv33b.W.data = caffe_model[
        'inception_4a/double3x3b'].W.data
    googlenet.inc4a.proj3.W.data = caffe_model[
        'inception_4a/3x3_reduce'].W.data
    googlenet.inc4a.proj33.W.data = caffe_model[
        'inception_4a/double3x3_reduce'].W.data
    googlenet.inc4a.poolp.W.data = caffe_model['inception_4a/pool_proj'].W.data

    # inc4b
    googlenet.inc4b.conv1.W.data = caffe_model['inception_4b/1x1'].W.data
    googlenet.inc4b.conv3.W.data = caffe_model['inception_4b/3x3'].W.data
    googlenet.inc4b.conv33a.W.data = caffe_model[
        'inception_4b/double3x3a'].W.data
    googlenet.inc4b.conv33b.W.data = caffe_model[
        'inception_4b/double3x3b'].W.data
    googlenet.inc4b.proj3.W.data = caffe_model[
        'inception_4b/3x3_reduce'].W.data
    googlenet.inc4b.proj33.W.data = caffe_model[
        'inception_4b/double3x3_reduce'].W.data
    googlenet.inc4b.poolp.W.data = caffe_model['inception_4b/pool_proj'].W.data

    # inc4c
    # googlenet.inc4c.conv1.W.data = caffe_model['inception_4c/1x1'].W.data
    googlenet.inc4c.conv3.W.data = caffe_model['inception_4c/3x3'].W.data
    googlenet.inc4c.conv33a.W.data = caffe_model[
        'inception_4c/double3x3a'].W.data
    googlenet.inc4c.conv33b.W.data = caffe_model[
        'inception_4c/double3x3b'].W.data
    googlenet.inc4c.proj3.W.data = caffe_model[
        'inception_4c/3x3_reduce'].W.data
    googlenet.inc4c.proj33.W.data = caffe_model[
        'inception_4c/double3x3_reduce'].W.data
    # googlenet.inc4c.poolp.W.data = caffe_model['inception_4c/pool_proj'].W.data

    # inc4d
    # googlenet.inc4d.conv1.W.data = caffe_model['inception_4d/1x1'].W.data
    googlenet.inc4d.conv3.W.data = caffe_model['inception_4d/3x3'].W.data
    googlenet.inc4d.conv33a.W.data = caffe_model[
        'inception_4d/double3x3a'].W.data
    googlenet.inc4d.conv33b.W.data = caffe_model[
        'inception_4d/double3x3b'].W.data
    googlenet.inc4d.proj3.W.data = caffe_model[
        'inception_4d/3x3_reduce'].W.data
    googlenet.inc4d.proj33.W.data = caffe_model[
        'inception_4d/double3x3_reduce'].W.data
    # googlenet.inc4d.poolp.W.data = caffe_model['inception_4d/pool_proj'].W.data

    # inc4e
    googlenet.inc4e.conv3.W.data = caffe_model['inception_4e/3x3'].W.data
    googlenet.inc4e.conv33a.W.data = caffe_model[
        'inception_4e/double3x3a'].W.data
    googlenet.inc4e.conv33b.W.data = caffe_model[
        'inception_4e/double3x3b'].W.data
    googlenet.inc4e.proj3.W.data = caffe_model[
        'inception_4e/3x3_reduce'].W.data
    googlenet.inc4e.proj33.W.data = caffe_model[
        'inception_4e/double3x3_reduce'].W.data

    # inc5a
    googlenet.inc5a.conv1.W.data = caffe_model['inception_5a/1x1'].W.data
    googlenet.inc5a.conv3.W.data = caffe_model['inception_5a/3x3'].W.data
    googlenet.inc5a.conv33a.W.data = caffe_model[
        'inception_5a/double3x3a'].W.data
    googlenet.inc5a.conv33b.W.data = caffe_model[
        'inception_5a/double3x3b'].W.data
    googlenet.inc5a.proj3.W.data = caffe_model[
        'inception_5a/3x3_reduce'].W.data
    googlenet.inc5a.proj33.W.data = caffe_model[
        'inception_5a/double3x3_reduce'].W.data
    googlenet.inc5a.poolp.W.data = caffe_model['inception_5a/pool_proj'].W.data

    # inc5b
    googlenet.inc5b.conv1.W.data = caffe_model['inception_5b/1x1'].W.data
    googlenet.inc5b.conv3.W.data = caffe_model['inception_5b/3x3'].W.data
    googlenet.inc5b.conv33a.W.data = caffe_model[
        'inception_5b/double3x3a'].W.data
    googlenet.inc5b.conv33b.W.data = caffe_model[
        'inception_5b/double3x3b'].W.data
    googlenet.inc5b.proj3.W.data = caffe_model[
        'inception_5b/3x3_reduce'].W.data
    googlenet.inc5b.proj33.W.data = caffe_model[
        'inception_5b/double3x3_reduce'].W.data
    googlenet.inc5b.poolp.W.data = caffe_model['inception_5b/pool_proj'].W.data

    # googlenet.loss1_conv.W.data = caffe_model["loss1/conv"].W.data
    # googlenet.loss1_fc1.W.data = caffe_model["loss1/fc"].W.data
    # googlenet.loss2_conv.W.data = caffe_model["loss2/conv"].W.data
    # googlenet.loss2_fc1.W.data = caffe_model["loss2/fc"].W.data

    serializers.save_npz('tuned_googlenetbn.npz', googlenet)
    print('Done')
Пример #14
0
    # Load data and Segmentationprefetcher
    print("loading data...")
    data = loadseg.SegmentationData("dataset/broden1_227")
    pf = loadseg.SegmentationPrefetcher(data,
                                        categories=["image"],
                                        split=None,
                                        once=True,
                                        batch_size=1)

    # prepare generator for image data in numpy array
    batch = pf.tensor_batches(bgr_mean=MEAN)

    # loading caffe model
    print("loading caffe model...")
    model = CaffeFunction(
        "../NetDissect/zoo/caffe_reference_imagenet.caffemodel")
    print("Caffe model loaded.")
    model.cleargrads()

    print("saving saliency_map")
    save_saliency_map(model, batch, end=1000)

    # initialize batch generator
    batch = pf.tensor_batches(bgr_mean=MEAN)
    W = 4
    H = 3
    fig, axes = plt.subplots(W, H * 2)
    axes = axes.reshape(-1)
    start = 80
    category = []
    for i, (smap, index) in enumerate(saliency_map(model, batch)):
Пример #15
0
def main():
    #オプションの追加
    parser = argparse.ArgumentParser(description='Chainer : C3D')
    parser.add_argument('--arch',
                        '-a',
                        default='ADAM',
                        help='Convnet architecture')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='../result/20171006',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='resume',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=1000,
                        help='Number of units')
    parser.add_argument('--input',
                        '-i',
                        default='../data/sample_videoset_171x128',
                        help='Directory to input data')

    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# unit: {}'.format(args.unit))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('# input: {}'.format(args.input))
    print('')

    #データセットの読み込み
    v = videoread.VideoRead()
    train, test = v.combine_data_label(args.input)

    #学習済みCaffeモデルの設定
    model = CaffeFunction("c3d_resnet18_ucf101_r2_ft_iter_20000.caffemodel")
    #print (pre_model.shape)

    #model = c3dnet.C3D()
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU

    #Setup an optimizer"
    optimizer = chainer.optimizers.MomentumSGD()
    optimizer.setup(model)
    "caffemodelへの入力の仕方"
    #models.layersでレイヤー一覧
    #model(inputs={"data": np.zeros((8,128,171,3))}, outputs={"conv1"})
    #make iterators"
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)
    #set up a trainer"
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    val_interval = (20), 'iteration'
    log_interval = (20), 'iteration'
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'),
                   trigger=val_interval)

    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy', 'lr'
    ]),
                   trigger=log_interval)
    #Progress barを表示
    trainer.extend(extensions.ProgressBar())  #update_interval=10))

    trainer.run()
    serializers.save_npz("mymodel.npz", model)
Пример #16
0
import pickle
from chainer.links.caffe import CaffeFunction
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--output', '-o', default='alexnet.pkl')
parser.add_argument('--input', '-i', default='bvlc_alexnet.caffemodel')
args = parser.parse_args()

loadpath = args.input
savepath = args.output

alexnet = CaffeFunction(loadpath)
pickle.dump(alexnet, open(savepath, 'wb'))
from __future__ import print_function

import os, sys

from chainer.links.caffe import CaffeFunction
from chainer import serializers

import utils

print('load VGG16 caffemodel')
vgg = CaffeFunction('pretrained_model/VGG_ILSVRC_16_layers.caffemodel')
print('save "vgg16.npz"')
serializers.save_npz('pretrained_model/vgg16.npz', vgg)
Пример #18
0
		print 'average time: {} ms\n'.format(float(record['time']) * 1000/ record['number'])
	print '================================================'

timer_hook = TimerHook()
layer_timer_hook = TimerHook()
layer_timer_hook.name = 'Layer-by-layer timer hook'

# configuration for inference
chainer.config.train = False
progress_bar = ProgressBar(estimate_load_time)
progress_bar.start()

print 'loading caffe model...'

start_time = time.time()
func = CaffeFunction(model_path)
end_time = time.time()

progress_bar.end()
time.sleep(1)
print '\nsuccessfully load caffe model, it costs %s seconds' % (end_time - start_time)

max_iter = 1000 if 50000 / N >= 1000 else 50000 / N
total_time = 0
average_time = 0


# global batch_geneartor
bg = batch_generator()

# count top 1 and top 5 accracy 
Пример #19
0
    # Define a model
    if args.pretrain:
        logger.info('Define a R-CNN_Face model')
        model = models.RCNNFaceModel()
    else:
        logger.info('Define a HyperFace model')
        model = models.HyperFaceModel()

    # Initialize model
    if not args.resume:
        if args.pretrain and config.alexnet_caffemodel_path:
            # Initialize using caffemodel
            logger.info('Overwrite conv layers using caffemodel "{}"'
                        .format(config.alexnet_caffemodel_path))
            caffe_model = CaffeFunction(config.alexnet_caffemodel_path)
            copy_layers(caffe_model, model)
        elif not args.pretrain and args.pretrainedmodel:
            # Initialize using pretrained model
            logger.info('Overwrite conv layers using pretraindmodel "{}"'
                        .format(args.pretrainedmodel))
            pre_model = models.RCNNFaceModel()
            chainer.serializers.load_npz(args.pretrainedmodel, pre_model)
            copy_layers(pre_model, model)

    # Setup GPU
    
    if config.gpu >= 0:
        logger.info('Verifying GPU')
        chainer.cuda.check_cuda_available()
        chainer.cuda.get_device(config.gpu).use()
Пример #20
0
def save(func):
    for candidate in func.layers:
        if (candidate[0]) in dir(func):
            name = candidate[0]
            savePlot(func[name], name)


## AlexNet visualizer
from chainer.links.caffe import CaffeFunction
import cPickle as pickle
try:  # Load pickled one
    gn = pickle.load(open('bvlc_alexnet.pickle'))
except:  # Or load the original & keep it
    print "loading the original caffe model, takes time. hold on and relax..."
    gn = CaffeFunction('bvlc_alexnet.caffemodel')
    pickle.dump(gn, open('bvlc_alexnet.pickle', 'wb'), -1)

showPlot(gn.conv1)
#showPlot(gn.conv2)
#showPlot(gn.conv3)
#showPlot(gn.conv4)
#showPlot(gn.conv5)
#showPlot(gn.fc6)
#showPlot(gn.fc7)
#showPlot(gn.fc8)

# alternative visualizer for conv1 (96, 3, 11, 11)
for i in range(4):
    plt.subplot(2, 2, i + 1)
    plt.axis('off')
Пример #21
0
from chainer.links.caffe import CaffeFunction
from FCN import PPO
import torch
import numpy as np
net = CaffeFunction('./initial_weight/zhang_cvpr17_denoise_50_gray.caffemodel')
print(net.layer1.W.data.shape)
model = PPO(9, 1)
model_dict = model.state_dict()
print(model_dict['conv.0.weight'].size())
print(model_dict.keys())
model_dict['conv.0.weight'] = torch.FloatTensor(net.layer1.W.data)
model_dict['conv.0.bias'] = torch.FloatTensor(net.layer1.b.data)
model_dict['conv.2.weight'] = torch.FloatTensor(net.layer3.W.data)
model_dict['conv.2.bias'] = torch.FloatTensor(net.layer3.b.data)
model_dict['conv.4.weight'] = torch.FloatTensor(net.layer6.W.data)
model_dict['conv.4.bias'] = torch.FloatTensor(net.layer6.b.data)
model_dict['conv.6.weight'] = torch.FloatTensor(net.layer9.W.data)
model_dict['conv.6.bias'] = torch.FloatTensor(net.layer9.b.data)

model_dict['diconv1_p.weight'] = torch.FloatTensor(net.layer12.W.data)
model_dict['diconv1_p.bias'] = torch.FloatTensor(net.layer12.b.data)
model_dict['diconv2_p.weight'] = torch.FloatTensor(net.layer15.W.data)
model_dict['diconv2_p.bias'] = torch.FloatTensor(net.layer15.b.data)

model_dict['diconv1_v.weight'] = torch.FloatTensor(net.layer12.W.data)
model_dict['diconv1_v.bias'] = torch.FloatTensor(net.layer12.b.data)
model_dict['diconv2_v.weight'] = torch.FloatTensor(net.layer15.W.data)
model_dict['diconv2_v.bias'] = torch.FloatTensor(net.layer15.b.data)
model.load_state_dict(model_dict)
torch.save(model.state_dict(), "./torch_initweight/sig50_gray.pth")
Пример #22
0
def main():
    # 単語リスト
    words = {}

    # 単語を読み込む
    f = codecs.open("data\\caption-words.txt", "r", "utf-8")

    line = f.readline()
    while line:
        # 不要文字を削除し分割
        l = line.strip().split(",")
        words[l[1]] = int(l[0])
        # 次の行を読み込む
        line = f.readline()
    # クローズ処理
    f.close()

    # 読込
    s_w = codecs.open("data\\caption-wakati.txt", "r", "utf-8")
    s_i = codecs.open("data\\img_id.txt", "r", "utf-8")

    # 全ての画像ベクトルと説明文のセット
    sentence = []

    # TODO:CaffeModel=>pickle化
    # ReferenceError: weakly-referenced object no longer exists

    MODEL = "model\\bvlc_alexnet.caffemodel"
    # PICKLE = "model\\alex_net.pkl"
    # pickleを読込
    # if os.path.exists(PICKLE):
    #     # 存在する場合
    #     model = pickle.load(open(PICKLE, "rb"))

    # else:
    #     # 存在しない場合
    #     if os.path.exists(MODEL):
    #         CaffeModelをpickleに変換する
    #         model = CaffeFunction(MODEL)
    #         pickle.dump(model, open(PICKLE, "wb"))
    #         model = pickle.load(open(PICKLE, "rb"))
    #     else:
    #         CaffeModelが存在しない場合は中断
    #         print("model notfound")
    #         exit()

    model = CaffeFunction(MODEL)

    if uses_device >= 0:
        # GPUを使う
        chainer.cuda.get_device_from_id(0).use()
        chainer.cuda.check_cuda_available()
        model.to_gpu()

    # 1行ずつ処理を行う
    line = s_w.readline()
    img_id = s_i.readline()

    while line and img_id:
        # 行中の単語をリスト化
        l = line.strip().split(" ")
        # ファイル名を作成
        file_name = "image\\" + img_id.strip() + ".jpg"
        # デバッグ
        print(file_name)
        # ファイルの読込]
        img = Image.open(file_name).resize((400, 400)).convert("RGB")

        # 画像ベクトルの配列
        vectors = []
        # 4辺+中央で計5枚の画像を作る
        for s in [
            (0, 0, 227, 227),  # 左上
            (173, 0, 400, 277),  # 右上
            (0, 173, 227, 400),  # 左下
            (173, 173, 400, 400),  # 右下
            (86, 86, 313, 313)  # 中央
        ]:
            # 画像から切り出し
            cropimg = img.crop(s)
            # 画素を数値データに変換
            pix = np.array(cropimg, dtype=np.float32)
            pix = (pix[::-1]).transpose(2, 0, 1)
            x = cp.array([pix], dtype=cp.float32)

            # Debug
            print(x.ndim)
            print(x.shape)
            print(x.size)

            # TODO: Error fix
            # Expect: x.shape[1] == W.shape[1]
            # 読み込み2回目でエラー(左側が次元数おかしい)
            # Actual: 12288 != 9216

            # fc6層のデータを抽出
            e, = model(inputs={"data": x}, outputs=["fc6"], disable=["drop6"])
            # 画像ベクトルの配列に結果を格納
            vectors.append(e.data[0].copy())
        # 数値の配列
        lines = [0]
        # 単語を数値に変換
        for x in l:
            if x in words:
                lines.append(words[x])
        # 行が終わったところで終端文字を挿入
        lines.append(1)
        sentence.append((vectors, lines))
        # 次の行
        line = s_w.readline()
        img_id = s_i.readline()

    # クローズ処理
    s_w.close()
    s_i.close()

    # 最長の文
    l_max = max([len(l[1]) for l in sentence])

    # 文長を揃える(バッチ処理の関係)
    for i in range(len(sentence)):
        # 足りない長さは終端文字で埋める
        sentence[i][1].extend([1] * (l_max - len(sentence[i][1])))

    # ニューラルネットワークの作成
    model = ImageCaption_NN(max(words.values()) + 1, 500)

    if uses_device >= 0:
        model.to_gpu()

    # Optimizerを作成
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    # Iteratorを作成
    train_iter = iterators.SerialIterator(sentence, batch_size, shuffle=False)

    updater = ImageCaptionUpdater(train_iter, optimizer, device=uses_device)
    trainer = training.Trainer(updater, (80, "epoch"), out="result")

    # 学習状況を可視化
    trainer.extend(extensions.ProgressBar(update_interval=1))
    # 学習実行
    trainer.run()

    # 結果を保存する
    chainer.serializers.save_hdf5("result.hdf5", model)
    for child in src.children():
        if child.name not in dst.__dict__: continue
        dst_child = dst[child.name]
        if type(child) != type(dst_child): continue
        if isinstance(child, link.Chain):
            copy_model(child, dst_child)
        if isinstance(child, link.Link):
            match = True
            for a, b in zip(child.namedparams(), dst_child.namedparams()):
                if a[0] != b[0]:
                    match = False
                    break
                if a[1].data.shape != b[1].data.shape:
                    match = False
                    break
            if not match:
                print('Ignore %s because of parameter mismatch' % child.name)
                continue
            for a, b in zip(child.namedparams(), dst_child.namedparams()):
                b[1].data = a[1].data
            print('Copy %s' % child.name)

print('load VGG16 caffemodel')
ref = CaffeFunction('VGG_ILSVRC_16_layers.caffemodel')
vgg = VGG()
print('copy weights')
copy_model(ref, vgg)

print('save "vgg16.model"')
serializers.save_npz('vgg16.model', vgg)
Пример #24
0
from chainer.serializers import npz
import os, tkinter.filedialog, tkinter, tkinter.messagebox
from chainer.links.caffe import CaffeFunction
root = tkinter.Tk()
root.withdraw()
fTyp = [("", "*")]
iDir = os.path.abspath(os.path.dirname(__file__))
path = tkinter.filedialog.askopenfilename(filetypes=fTyp, initialdir=iDir)
model = CaffeFunction(path)
npz.save_npz('vgg16.npz', model, compression=False)