コード例 #1
0
ファイル: convert_annot.py プロジェクト: BoAdBo/AlphaPose
    for person in xrange(mpii.numpeople(idx)):
        c,s = mpii.location(idx,person)
        if not c[0] == -1:
            # Add info to annotation list
            annot['index'] += [idx]
            annot['person'] += [person]
            imgname = np.zeros(16)
            refname = str(imgnameRef[idx][0][0][0][0])
            for i in range(len(refname)): imgname[i] = ord(refname[i])
            annot['imgname'] += [imgname]
            annot['center'] += [c]
            annot['scale'] += [s]
            annot['multi'] += [multiRef[idx]]

            if mpii.istrain(idx) == True:
                # Part annotations and visibility
                coords = np.zeros((16,2))
                vis = np.zeros(16)
                for part in xrange(16):
                   coords[part],vis[part] = mpii.partinfo(idx,person,part)
                annot['part'] += [coords]
                annot['visible'] += [vis]
                annot['normalize'] += [mpii.normalization(idx,person)]
                annot['torsoangle'] += [mpii.torsoangle(idx,person)]
                annot['istrain'] += [1]
            else:
                annot['part'] += [-np.ones((16,2))]
                annot['visible'] += [np.zeros(16)]
                annot['normalize'] += [1]
                annot['torsoangle'] += [0]
コード例 #2
0
keys = [
    'index', 'person', 'imgname', 'center', 'scale', 'part', 'visible',
    'normalize', 'torsoangle'
]
annot = {k: [] for k in keys}
dotrain = True

# Get image filenames
imgnameRef = mpii.annot['annolist'][0][0][0]['image'][:]

for idx in xrange(mpii.nimages):
    print "\r", idx,
    sys.stdout.flush()

    if mpii.istrain(idx) == dotrain:
        for person in xrange(mpii.numpeople(idx)):
            c, s = mpii.location(idx, person)
            if not c[0] == -1:
                # Adjust center/scale slightly to avoid cropping limbs
                # (in hindsight this should have been done in the Torch code...)
                c[1] += 15 * s
                s *= 1.25

                # Part annotations and visibility
                coords = np.zeros((16, 2))
                vis = np.zeros(16)
                for part in xrange(16):
                    coords[part], vis[part] = mpii.partinfo(idx, person, part)

                # Add info to annotation list
コード例 #3
0
    for person in xrange(mpii.numpeople(idx)):
        c,s = mpii.location(idx,person)
        if not c[0] == -1:
            # Add info to annotation list
            annot['index'] += [idx]
            annot['person'] += [person]
            imgname = np.zeros(16)
            refname = str(imgnameRef[idx][0][0][0][0])
            for i in range(len(refname)): imgname[i] = ord(refname[i])
            annot['imgname'] += [imgname]
            annot['center'] += [c]
            annot['scale'] += [s]
            annot['multi'] += [multiRef[idx]]

            if mpii.istrain(idx) == True:
                # Part annotations and visibility
                coords = np.zeros((16,2))
                vis = np.zeros(16)
                for part in xrange(16):
                   coords[part],vis[part] = mpii.partinfo(idx,person,part)
                annot['part'] += [coords]
                annot['visible'] += [vis]
                annot['normalize'] += [mpii.normalization(idx,person)]
                annot['torsoangle'] += [mpii.torsoangle(idx,person)]
                annot['istrain'] += [1]
            else:
                annot['part'] += [-np.ones((16,2))]
                annot['visible'] += [np.zeros(16)]
                annot['normalize'] += [1]
                annot['torsoangle'] += [0]
コード例 #4
0
import numpy as np
import sys
import mpii

keys = ['index','person','imgname','center','scale','part','visible','normalize','torsoangle']
annot = {k:[] for k in keys}
dotrain = True

# Get image filenames
imgnameRef = mpii.annot['annolist'][0][0][0]['image'][:]

for idx in xrange(mpii.nimages):
    print "\r",idx,
    sys.stdout.flush()

    if mpii.istrain(idx) == dotrain:
        for person in xrange(mpii.numpeople(idx)):
            c,s = mpii.location(idx,person)
            if not c[0] == -1:
                # Adjust center/scale slightly to avoid cropping limbs
                # (in hindsight this should have been done in the Torch code...)
                c[1] += 15 * s
                s *= 1.25

                # Part annotations and visibility
                coords = np.zeros((16,2))
                vis = np.zeros(16)
                for part in xrange(16):
                   coords[part],vis[part] = mpii.partinfo(idx,person,part)

                # Add info to annotation list