Пример #1
0
    def __init__(self, fn="illust2vec_tag_ver200.caffemodel", alpha=[0,0,0,1,10,100], beta=[0.1,1,1,10,100,1000]):
        print "load model... %s"%fn
        self.model = caffe.CaffeFunction(fn)
        self.alpha = alpha
        self.beta = beta
#        self.pool_func = F.max_pooling_2d
        self.pool_func = F.average_pooling_2d
Пример #2
0
    def __init__(self, fn="bvlc_googlenet.caffemodel", alpha=[0,0,0,0.01,0.01], beta=[0.001,1,1,1,1]):
        print "load model... %s"%fn
        self.model = caffe.CaffeFunction(fn)
        self.alpha = alpha
        self.beta = beta
#        self.pool_func = F.max_pooling_2d
        self.pool_func = F.average_pooling_2d
Пример #3
0
 def load(self, path):
     root, ext = os.path.splitext(path)
     if ext == '.pkl':
         with open(path, 'rb') as f:
             self.func = pickle.load(f)
     else:
         self.func = caffe.CaffeFunction(path)
Пример #4
0
    def __init__(self, path):
        """
        Extract Image Feature Function

        :param path: vgg19 filepath
        :return:None
        """
        self.function = caffe.CaffeFunction(path)
Пример #5
0
 def __init__(self,
              fn="VGG_ILSVRC_16_layers.caffemodel",
              alpha=[0, 0, 1, 1],
              beta=[1, 1, 1, 1]):
     print("load model... %s" % fn)
     self.model = caffe.CaffeFunction(fn)
     self.alpha = alpha
     self.beta = beta
Пример #6
0
 def __init__(self,
              fn="nin_imagenet.caffemodel",
              alpha=[0, 0, 1, 1],
              beta=[1, 1, 1, 1]):
     print("load model... %s" % fn)
     self.model = caffe.CaffeFunction(fn)
     self.alpha = alpha
     self.beta = beta
Пример #7
0
 def load(self, path):
     body, ext = os.path.splitext(path)
     if os.path.exists(path):
         print 'Loading', path
         self.func = pickle.load(open(path, 'rb'))
     else:
         print 'Loading', body+'.caffemodel'
         self.func = caffe.CaffeFunction(body+'.caffemodel')
         if self.func != None:
             print 'Saving', body+'.pkl'
             pickle.dump(self.func, open(body+'.pkl', 'wb'))
     return self.func != None
Пример #8
0
def ready():
    global categories
    global mean_image
    global func
    #    print('Loading Contour label file...')
    categories = np.loadtxt(LABEL_PATH, str, delimiter='\n')
    #    print('Loading Contour mean file...')
    mean_image = np.load(MEAN_PATH)
    #    print('Mean shape:', mean_image.shape)
    #    print('Loading Contour model file...')
    func = caffe.CaffeFunction(MODEL_PATH)
    return
Пример #9
0
def load_param(path, obj, ignore_layers=None):
    src = None

    # load .pkl if exists
    if os.path.isfile(change_ext(path, '.pkl')):
        with open(change_ext(path, '.pkl'), 'rb') as f:
            src = pickle.load(f)
    # load caffemodel and save pkl (if .pkl file doesn't exist)
    else:
        src = caffe.CaffeFunction(change_ext(path, '.caffemodel'))
        pickle.dump(src, open(change_ext(path, '.pkl'), 'wb'))

    copy_model(src, obj, ignore_layers)
Пример #10
0
def main():
    parser = argparse.ArgumentParser(
        description='Convert caffemodel to chainermodel')
    parser.add_argument('model', help='Path to caffemodel')
    parser.add_argument('--out',
                        '-o',
                        default='chainermodel',
                        help='Output directory')
    args = parser.parse_args()

    try:
        os.makedirs(args.out)
    except OSError:
        pass

    caffemodel = caffe.CaffeFunction(args.model)
    modelname = extractFilename(args.model)
    pickle.dump(caffemodel,
                open(os.path.join(args.out, modelname + '.pkl'), 'wb'), -1)
Пример #11
0
    L_style = (Fu.mean_squared_error(conv1_1G,conv1_1A)/(4*64*64*50176*50176)
    + Fu.mean_squared_error(conv2_1G,conv2_1A)/(4*128**128*12544*12544)
    + Fu.mean_squared_error(conv3_1G,conv3_1A)/(4*256*256*3136*3136)
    + Fu.mean_squared_error(conv4_1G,conv4_1A)/(4*512*512*784*784)\
    )/4 # this is equal weighting of E_l

    loss = a_p_ratio*L_content + L_style
    return loss 

#main

p=readimage(content_image)#read content image
a=readimage(style_image)#read style image

print "Loading caffe model.It takes time...."
func = caffe.CaffeFunction('VGG_ILSVRC_19_layers.caffemodel')
if gpu_id >= 0:
    func.to_gpu()
print "....fhinish loading!"


x_data=xp.random.randn(1,3,224,224).astype(np.float32)
x = Variable(x_data)

#x = readimage('imge230.png') # if you want to start from a exsiting image

#optimize x(=image) with adam

alpha=1
beta1=0.9
beta2=0.999
Пример #12
0
    shape = (fixed_w * w / h, fixed_h)
else:
    shape = (fixed_w, fixed_h * h / w)

left = (shape[0] - fixed_w) / 2
top = (shape[1] - fixed_h) / 2
right = left + fixed_w
bottom = top + fixed_h
image = image.resize(shape)
image = image.crop((left, top, right, bottom))
x_data = np.asarray(image).astype(np.float32)
x_data = x_data.transpose(2, 0, 1)
x_data = x_data[::-1, :, :]

mean_image = np.zeros(3 * 224 * 224).reshape(3, 224, 224).astype(np.float32)
mean_image[0] = 103.0
mean_image[1] = 117.0
mean_image[2] = 123.0

x_data -= mean_image
x_data = np.array([x_data])

x = chainer.Variable(x_data)
func = caffe.CaffeFunction('bvlc_googlenet.caffemodel')
y, = func(inputs={'data': x}, outputs=['loss3/classifier'], train=False)

prob = F.softmax(y)
labels = open('labels.txt').read().split('\n')
maxid = np.argmax(prob.data[0])
print labels[maxid], prob.data[0, maxid]
import pickle
import cv2
import numpy as np
from chainer import Variable
from chainer.functions import caffe
from chainer import cuda

if len(sys.argv) < 3:
    print("usage: %s imagedir outputfile" % sys.argv[0])
    quit()
inputpath = sys.argv[1]
outputfile = sys.argv[2]

# Caffeのモデル読み込み(時間がかかる)
print("loading model ... ", end="", file=sys.stderr)
model = caffe.CaffeFunction("bvlc_reference_caffenet.caffemodel")
model.to_gpu()
print("done", file=sys.stderr)

# 平均画像の読み込み
mean = np.load("ilsvrc_2012_mean.npy")  # 3x255x255 の画像


def load_images(inputpath, mean):
    imglist = []
    filenames = []

    for root, dirs, files in os.walk(inputpath):
        for fn in sorted(files):
            filenames.append(fn)
            bn, ext = os.path.splitext(fn)
Пример #14
0
                        help='caffe model file path')
    parser.add_argument('--label',
                        '-l',
                        type=str,
                        default='labels.txt',
                        help='label file path')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU device index. negative value indicates CPU')
    args = parser.parse_args()

    device = args.gpu
    categories = np.loadtxt(args.label, str, delimiter='\n')
    caffe_model = caffe.CaffeFunction(args.model)
    if device >= 0:
        caffe_model.to_gpu(device)

    if os.path.isdir(args.image_path):
        image_files = os.listdir(args.image_path)
        image_paths = map(lambda f: os.path.join(args.image_path, f),
                          image_files)
        image_paths = filter(os.path.isfile, image_paths)
    else:
        image_paths = [args.image_path]

    for image_path in image_paths:
        try:
            x = load_image(image_path, device)
            y = predict(caffe_model, x)
Пример #15
0
    )/4 # this is equal weighting of E_l
    #
    ratio = 0.001  #alpha/beta
    loss = ratio * L_content + L_style
    return loss


#main

cuda.init(3)  # is GPU ID!!

p = readimage('satoshi_fb.png')  #read a content image
a = readimage('style.png')  #read a style image

#download a pretraind caffe model from here: https://gist.github.com/ksimonyan/3785162f95cd2d5fee77#file-readme-md
func = caffe.CaffeFunction(
    'VGG_ILSVRC_19_layers.caffemodel')  #it takes some time.
func.to_gpu()

x_data = np.random.randn(1, 3, 224, 224).astype(np.float32)
x = Variable(cuda.to_gpu(x_data))

x = readimage('imge230.png')  # if you want to start from a exsiting image

savedir = "satoshi_fb_adam"

#optimize x(=image) with adam
#note we use numpy for optimization

alpha = 1
beta1 = 0.9
beta2 = 0.999
import cPickle as pickle
from chainer.functions import caffe
import sys

model_path = sys.argv[1]
pkl_path = sys.argv[2]

model = caffe.CaffeFunction(model_path)
with open(pkl_path, 'wb') as f:
    pickle.dump(model, f, pickle.HIGHEST_PROTOCOL)
Пример #17
0
    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()

    xp = cuda.cupy if args.gpu >= 0 else np
    xp.random.seed(123)

    # load a new model to be fine-tuned
    if os.path.exists('model/vgg19.pkl'):
        print('Loading Caffe model file: {}'.format('model/vgg19.pkl'))
        with open('model/vgg19.pkl', 'rb') as f:
            vgg = pickle.load(f)
    else:
        print('Loading Caffe model file: {}'.format(
            'model/VGG_ILSVRC_19_layers.caffemodel'))
        vgg = caffe.CaffeFunction('model/VGG_ILSVRC_19_layers.caffemodel')
        pickle.dump(vgg, open('model/vgg19.pkl', 'wb'))

    if args.gpu >= 0:
        vgg.to_gpu()

    X, Y = load_data()
    # x = np.zeros((len(X), 4096), dtype=np.float32)
    x = []

    for i in xrange(0, len(X)):
        pixels = X[i]
        array = xp.asarray(pixels)
        x_data = xp.ascontiguousarray(array)
        if args.gpu >= 0:
            x_data = cuda.to_gpu(x_data)
Пример #18
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Convert caffemodel file to npz format for saving time in traning preparation
"""

from chainer.functions import caffe
from chainer import serializers

ALEXNET_CAFFEMODEL = 'data/bvlc_alexnet.caffemodel'
ALEXNET_NPZ = 'data/bvlc_alexnet.npz'

if __name__ == '__main__':
    alexnet = caffe.CaffeFunction(ALEXNET_CAFFEMODEL)
    serializers.save_npz(ALEXNET_NPZ, alexnet)
Пример #19
0
from chainer.functions import caffe
import chainer.serializers as s
import cPickle as pickle
from animeface import AnimeFaceDataset
import chainer.serializers as s
import os
import cv2 as cv
import sys
import numpy as np
from PIL import Image
import chainer
from chainer import cuda
import chainer.functions as F
from CNN_2 import CNN

from chainer.functions import caffe
import chainer.serializers as s
import cPickle as pickle

alex = caffe.CaffeFunction("bvlc_alexnet.caffemodel")
pickle.dump(alex, open('alex', 'wb'))

print 'Converting caffe model to chainer model was completed!'

cnn = CNN(data=np.zeros(5), target=np.zeros(5), gpu=-1, n_outputs=0)

#cnn.train_and_test(n_epoch=20,batchsize=100)

cnn.dump_model('_chainer_fc6')
    im = im - MEAN_VALUES
    return rawim.transpose(2, 0, 1).astype(np.float32)


#main

# Prepare dataset
file_place = '../data/MSCOCO/annotations/captions_train2014.json'
train_image_id2feature = get_image_ids(file_place)
file_place = '../data/MSCOCO/annotations/captions_val2014.json'
val_image_id2feature = get_image_ids(file_place)

#Caffeモデルをロード
print "loading caffe models"
func = caffe.CaffeFunction('../data/bvlc_googlenet.caffemodel')
if gpu_id >= 0:
    func.to_gpu()
print "done"

print 'feature_exractor'
file_base = '../data/MSCOCO/train2014/COCO_train2014_'
for i, image_id in enumerate(train_image_id2feature.keys()):

    if i % 5000 == 0:
        print i

    try:
        image = image_read_np(file_base +
                              str("{0:012d}".format(image_id) + '.jpg'))
    except Exception as e:
Пример #21
0
    k = 0
    for i in range(imsize[1] / 175):
        for j in range(imsize[0] / 175):
            k += 1
            image_c = image.crop(
                [175 * i, 175 * j, 175 * (i + 1), 175 * (j + 1)])
            image_c.save('%d%d.png' % (i, j))

            imshow(np.asarray(image_c))
            plt.show()

            cnn_dog('%d%d.png' % (i, j), k, categories_dog, func, verbose=True)


###############################################################################
# initialization of the caffe model

categories = np.loadtxt("labels_dog.txt", str, delimiter="\t")
categories_dog = [i for i in range(len(categories))
                  if 'dog' in categories[i]][:-2]

model = 'bvlc_googlenet.caffemodel'
gpu = -1

print('Loading Caffe model file %s...' % model, file=sys.stderr)
try:
    func
except NameError:
    func = caffe.CaffeFunction(model)
print('Loaded', file=sys.stderr)
Пример #22
0
def load_caffemodel(caffemodel):
    print("load model... %s" % caffemodel)
    model = caffe.CaffeFunction(caffemodel)
    return lambda layer_name: Conv(getattr(model, layer_name))
Пример #23
0
import sys
import os
import scipy as sp
import numpy as np

import chainer
from chainer import Function, gradient_check, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.functions import caffe
import cv2

import pickle

print('loading caffemodel')
model = caffe.CaffeFunction('../manga6_92000.caffemodel')
print('making pickle')
pickle.dump(model, open('../model.pkl', 'wb'))

print('symbolic link')
# titles = ["MeteoSanStrikeDesu","SaladDays_vol18","HinagikuKenzan","HarukaRefrain", "Belmondo", "LoveHina_vol14", "GOOD_KISS_Ver2", "YamatoNoHane", "Arisa", "AisazuNihaIrarenai"]
titles = ['Belmondo']
os.chdir('./WebGUI/image')
# トーン除去前のデータ
# os.remove('Belmondo')

for title in titles:
    os.symlink('../../../' + title, './' + title)
Пример #24
0
 def load(self, path):
     self.func = caffe.CaffeFunction(path)
Пример #25
0
def main(img_name, out_img_name):
    oriImg = cv.imread(img_name)  # B,G,R order
    param, model = config_reader()
    multiplier = [
        x * model['boxsize'] / oriImg.shape[0] for x in param['scale_search']
    ]
    #multiplier = multiplier[:1]

    #net = caffe.Net(model['deployFile'], model['caffemodel'], caffe.TEST)
    net = caffe.CaffeFunction(model['caffemodel'])

    heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
    paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))

    for m in range(len(multiplier)):
        scale = multiplier[m]
        imageToTest = cv.resize(oriImg, (0, 0),
                                fx=scale,
                                fy=scale,
                                interpolation=cv.INTER_CUBIC)
        imageToTest_padded, pad = util.padRightDownCorner(
            imageToTest, model['stride'], model['padValue'])
        print imageToTest_padded.shape

        #net.blobs['data'].reshape(*(1, 3, imageToTest_padded.shape[0], imageToTest_padded.shape[1]))
        #net.blobs['data'].data[...] = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,2,0,1))/256 - 0.5;
        netInput = ary2input(imageToTest_padded)

        start_time = time.time()
        #output_blobs = net.forward()
        netOutput = net(inputs={'data': netInput},
                        outputs=['Mconv7_stage6_L1', 'Mconv7_stage6_L2'])
        print('At scale %d, The CNN took %.2f ms.' %
              (m, 1000 * (time.time() - start_time)))

        # extract outputs, resize, and remove padding
        #heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1,2,0)) # output 1 is heatmaps
        heatmap = np.transpose(np.squeeze(netOutput[1].data),
                               (1, 2, 0))  # output 1 is heatmaps
        heatmap = cv.resize(heatmap, (0, 0),
                            fx=model['stride'],
                            fy=model['stride'],
                            interpolation=cv.INTER_CUBIC)
        heatmap = heatmap[:imageToTest_padded.shape[0] -
                          pad[2], :imageToTest_padded.shape[1] - pad[3], :]
        heatmap = cv.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]),
                            interpolation=cv.INTER_CUBIC)

        #paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1,2,0)) # output 0 is PAFs
        paf = np.transpose(np.squeeze(netOutput[0].data),
                           (1, 2, 0))  # output 0 is PAFs
        paf = cv.resize(paf, (0, 0),
                        fx=model['stride'],
                        fy=model['stride'],
                        interpolation=cv.INTER_CUBIC)
        paf = paf[:imageToTest_padded.shape[0] -
                  pad[2], :imageToTest_padded.shape[1] - pad[3], :]
        paf = cv.resize(paf, (oriImg.shape[1], oriImg.shape[0]),
                        interpolation=cv.INTER_CUBIC)

        heatmap_avg = heatmap_avg + heatmap / len(multiplier)
        paf_avg = paf_avg + paf / len(multiplier)

    U = paf_avg[:, :, 16] * -1
    V = paf_avg[:, :, 17]
    X, Y = np.meshgrid(np.arange(U.shape[1]), np.arange(U.shape[0]))
    M = np.zeros(U.shape, dtype='bool')
    M[U**2 + V**2 < 0.5 * 0.5] = True
    U = ma.masked_array(U, mask=M)
    V = ma.masked_array(V, mask=M)

    import scipy
    print heatmap_avg.shape

    from scipy.ndimage.filters import gaussian_filter
    all_peaks = []
    peak_counter = 0

    start_time = time.time()

    for part in range(19 - 1):
        x_list = []
        y_list = []
        map_ori = heatmap_avg[:, :, part]
        map = gaussian_filter(map_ori, sigma=3)

        map_left = np.zeros(map.shape)
        map_left[1:, :] = map[:-1, :]
        map_right = np.zeros(map.shape)
        map_right[:-1, :] = map[1:, :]
        map_up = np.zeros(map.shape)
        map_up[:, 1:] = map[:, :-1]
        map_down = np.zeros(map.shape)
        map_down[:, :-1] = map[:, 1:]

        peaks_binary = np.logical_and.reduce(
            (map >= map_left, map >= map_right, map >= map_up, map >= map_down,
             map > param['thre1']))
        peaks = zip(np.nonzero(peaks_binary)[1],
                    np.nonzero(peaks_binary)[0])  # note reverse
        peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks]
        id = range(peak_counter, peak_counter + len(peaks))
        peaks_with_score_and_id = [
            peaks_with_score[i] + (id[i], ) for i in range(len(id))
        ]

        all_peaks.append(peaks_with_score_and_id)
        peak_counter += len(peaks)

    # find connection in the specified sequence, center 29 is in the position 15
    limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
            [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
            [1,16], [16,18], [3,17], [6,18]]
    # the middle joints heatmap correpondence
    mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], \
            [23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], \
            [55,56], [37,38], [45,46]]

    connection_all = []
    special_k = []
    mid_num = 10

    for k in range(len(mapIdx)):
        score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
        candA = all_peaks[limbSeq[k][0] - 1]
        candB = all_peaks[limbSeq[k][1] - 1]
        nA = len(candA)
        nB = len(candB)
        indexA, indexB = limbSeq[k]
        if (nA != 0 and nB != 0):
            connection_candidate = []
            for i in range(nA):
                for j in range(nB):
                    vec = np.subtract(candB[j][:2], candA[i][:2])
                    norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
                    vec = np.divide(vec, norm)

                    startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
                                np.linspace(candA[i][1], candB[j][1], num=mid_num))

                    vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
                                    for I in range(len(startend))])
                    vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
                                    for I in range(len(startend))])

                    score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(
                        vec_y, vec[1])
                    score_with_dist_prior = sum(
                        score_midpts) / len(score_midpts) + min(
                            0.5 * oriImg.shape[0] / norm - 1, 0)
                    criterion1 = len(
                        np.nonzero(score_midpts > param['thre2'])
                        [0]) > 0.8 * len(score_midpts)
                    criterion2 = score_with_dist_prior > 0
                    if criterion1 and criterion2:
                        connection_candidate.append([
                            i, j, score_with_dist_prior,
                            score_with_dist_prior + candA[i][2] + candB[j][2]
                        ])

            connection_candidate = sorted(connection_candidate,
                                          key=lambda x: x[2],
                                          reverse=True)
            connection = np.zeros((0, 5))
            for c in range(len(connection_candidate)):
                i, j, s = connection_candidate[c][0:3]
                if (i not in connection[:, 3] and j not in connection[:, 4]):
                    connection = np.vstack(
                        [connection, [candA[i][3], candB[j][3], s, i, j]])
                    if (len(connection) >= min(nA, nB)):
                        break

            connection_all.append(connection)
        else:
            special_k.append(k)
            connection_all.append([])

    # last number in each row is the total parts number of that person
    # the second last number in each row is the score of the overall configuration
    subset = -1 * np.ones((0, 20))
    candidate = np.array([item for sublist in all_peaks for item in sublist])

    for k in range(len(mapIdx)):
        if k not in special_k:
            partAs = connection_all[k][:, 0]
            partBs = connection_all[k][:, 1]
            indexA, indexB = np.array(limbSeq[k]) - 1

            for i in range(len(connection_all[k])):  #= 1:size(temp,1)
                found = 0
                subset_idx = [-1, -1]
                for j in range(len(subset)):  #1:size(subset,1):
                    if subset[j][indexA] == partAs[i] or subset[j][
                            indexB] == partBs[i]:
                        subset_idx[found] = j
                        found += 1

                if found == 1:
                    j = subset_idx[0]
                    if (subset[j][indexB] != partBs[i]):
                        subset[j][indexB] = partBs[i]
                        subset[j][-1] += 1
                        subset[j][-2] += candidate[partBs[i].astype(int),
                                                   2] + connection_all[k][i][2]
                elif found == 2:  # if found 2 and disjoint, merge them
                    j1, j2 = subset_idx
                    print "found = 2"
                    membership = ((subset[j1] >= 0).astype(int) +
                                  (subset[j2] >= 0).astype(int))[:-2]
                    if len(np.nonzero(membership == 2)[0]) == 0:  #merge
                        subset[j1][:-2] += (subset[j2][:-2] + 1)
                        subset[j1][-2:] += subset[j2][-2:]
                        subset[j1][-2] += connection_all[k][i][2]
                        subset = np.delete(subset, j2, 0)
                    else:  # as like found == 1
                        subset[j1][indexB] = partBs[i]
                        subset[j1][-1] += 1
                        subset[j1][-2] += candidate[
                            partBs[i].astype(int), 2] + connection_all[k][i][2]

                # if find no partA in the subset, create a new subset
                elif not found and k < 17:
                    row = -1 * np.ones(20)
                    row[indexA] = partAs[i]
                    row[indexB] = partBs[i]
                    row[-1] = 2
                    row[-2] = sum(
                        candidate[connection_all[k][i, :2].astype(int),
                                  2]) + connection_all[k][i][2]
                    subset = np.vstack([subset, row])

    # delete some rows of subset which has few parts occur
    deleteIdx = []
    for i in range(len(subset)):
        if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
            deleteIdx.append(i)
    subset = np.delete(subset, deleteIdx, axis=0)

    print('Body Estimation took %.2f ms.' % (1000 *
                                             (time.time() - start_time)))

    # visualize
    colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
            [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
            [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
    cmap = matplotlib.cm.get_cmap('hsv')

    canvas = cv.imread(img_name)  # B,G,R order

    for i in range(18):
        rgba = np.array(cmap(1 - i / 18. - 1. / 36))
        rgba[0:3] *= 255
        for j in range(len(all_peaks[i])):
            cv.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)

    # visualize 2
    stickwidth = 4

    for i in range(17):
        for n in range(len(subset)):
            index = subset[n][np.array(limbSeq[i]) - 1]
            if -1 in index:
                continue
            cur_canvas = canvas.copy()
            Y = candidate[index.astype(int), 0]
            X = candidate[index.astype(int), 1]
            mX = np.mean(X)
            mY = np.mean(Y)
            length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
            angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
            polygon = cv.ellipse2Poly(
                (int(mY), int(mX)), (int(length / 2), stickwidth), int(angle),
                0, 360, 1)
            cv.fillConvexPoly(cur_canvas, polygon, colors[i])
            canvas = cv.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
    cv.imwrite(out_img_name, canvas)
Пример #26
0
if args.gpu >= 0:
    cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
assert args.batchsize > 0

dataset = []
with open(args.dataset) as list_file:
    for line in list_file:
        pair = line.strip().split()
        path = os.path.join(args.basepath, pair[0])
        dataset.append((path, np.int32(pair[1])))

assert len(dataset) % args.batchsize == 0

print('Loading Caffe model file %s...' % args.model, file=sys.stderr)
func = caffe.CaffeFunction(args.model)
print('Loaded', file=sys.stderr)
if args.gpu >= 0:
    cuda.get_device(args.gpu).use()
    func.to_gpu()

if args.model_type == 'alexnet' or args.model_type == 'caffenet':
    in_size = 227
    mean_image = np.load(args.mean)

    def forward(x, t):
        y, = func(inputs={'data': x}, outputs=['fc8'], train=False)
        return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
elif args.model_type == 'googlenet':
    in_size = 224
    # Constant mean over spatial pixels
        cuda.check_cuda_available()
    xp = cuda.cupy if args.gpu >= 0 else np

    caffe_model = args.model
    chainer_model_path = \
        re.sub('\.caffemodel$', '.chainermodel.pkl', caffe_model)
    chainer_model = None
    if not os.path.isfile(chainer_model_path):
        if not os.path.isfile(caffe_model):
            print('{} is not found.'.format(caffe_model))
            exit(1)
        else:
            print('Converting the caffemodel.')
            print('It takes a while, ', end='')
            print('but this process is not required from the next time.')
            func = caffe.CaffeFunction(caffe_model)
            pickle.dump(func,
                        open(chainer_model_path, mode='wb+'))
    else:
        func = pickle.load(open(chainer_model_path, mode='rb'))

    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()
        func.to_gpu()

    in_size = 224
    mean_image = np.load(args.mean)
    cropwidth = 256 - in_size
    start = cropwidth // 2
    stop = start + in_size
    mean_image = mean_image[:, start:stop, start:stop].copy()