コード例 #1
0
ファイル: eval.py プロジェクト: zoombapup/AlphaPose
import numpy as np
from six.moves import xrange
import ref
import img

# Reference for other predictions
other_preds = {'nyu': {'flic': 'nyu_pred', 'mpii': 'nyu_pred'}}


def get_path(dataset_name, file_name):
    return ref.posedir + '/data/' + dataset_name + '/ref/' + file_name + '.npy'


# Load ground truth annotations
annot = {
    'flic': ref.load('flic', 'test'),
    'mpii': ref.load('mpii', 'valid'),
    'mpii_train': ref.load('mpii', 'train'),
    'mpii_test': ref.load('mpii', 'test')
}


def getdists(pred, dotrain=False):
    # Get normalized distances between predictions and ground truth

    # Automatically figures out dataset based on number of parts
    if pred.shape[1] == 11:
        dataset = 'flic'
    elif pred.shape[1] == 16:
        dataset = 'mpii'
    else:
コード例 #2
0
ファイル: report.py プロジェクト: BoAdBo/AlphaPose
def main(argv):
    dataset = None
    exp_id = None
    extra = []
    prev = []
    other_dists = {}
    vert = False
    images = False
    obs = False
    limb = False

    # Process command line arguments
    try:
        opts, args = getopt.getopt(argv, "hd:e:c:p:viol", ["help", "dataset=", "expID=", "compare=", "prev=",
                                                           "vert", "images", "obs", "limb"])
    except getopt.GetoptError:
        print "Incorrect arguments"
        helpmessage()
        sys.exit(2)
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            helpmessage()
        elif opt in ('-d', '--dataset'): dataset = arg
        elif opt in ('-e', '--expID'): exp_id = arg
        elif opt in ('-c', '--compare'): extra += arg.split(',')
        elif opt in ('-p', '--prev'): prev += arg.split(',')
        elif opt in ('-v', '--vert'): vert = True
        elif opt in ('-i', '--images'): images = True
        elif opt in ('-o', '--obs'): obs = True
        elif opt in ('-l', '--limb'): limb = True

    if dataset is None:
        print "No dataset chosen."
        helpmessage()
    if not (dataset in ['mpii','flic']):
        print "Bad argument for --dataset"
        helpmessage()
    if exp_id is None:
        print "No experiment number provided."
        helpmessage()
    expdir = ref.posedir + '/exp/' + dataset + '/' + exp_id

    # Generate the simple report for mini limb networks
    if limb:
        pdf = PdfPages(expdir + '/report.pdf')
        limbreport(dataset, exp_id, pdf)
        pdf.close()
        return

    # Load in dataset information
    num_parts = len(ref.parts[dataset])
    if obs:
        annot = ref.load(dataset, 'test_obs')
        eval.annot[dataset] = annot
    else:
        annot = ref.load(dataset, 'valid')

    # Load predictions
    print "Loading predictions"
    pred_opts = [num_parts, vert, obs]
    preds, dists, _ = loadpreds(dataset, expdir + '/preds.h5', pred_opts, images)

    # Load previous predictions
    for prv in prev:
        _,d,_ = loadpreds(dataset, expdir + '/preds_%s.h5' % prv, pred_opts)
        other_dists[prv] = [d, None]

    # Load comparison predictions
    for ext in extra:
        predfile = ref.posedir + '/exp/' + dataset + '/' + ext + '/preds.h5'
        _,d,_ = loadpreds(dataset, predfile, pred_opts)
        other_dists[ext] = [d, None]

    # Load previous best
    if vert: predfile = expdir + '/../best/preds_vert.h5'
    else: predfile = expdir + '/../best/preds.h5'
    _,best_dists,_ = loadpreds(dataset, predfile, pred_opts)
    #other_dists["Kaiyu's best model"] = [best_dists, None]

    # Load NYU predictions
    if dataset == 'mpii':
        nyu_dists = np.load(eval.get_path(dataset, 'nyu_dists'))
    else:
        if not obs: nyu_preds = np.load(eval.get_path(dataset, 'nyu_pred'))
        else: nyu_preds = np.load(eval.get_path(dataset, 'nyu_pred_obs'))
        nyu_dists = eval.getdists(nyu_preds)
        np.save('nyu_dists_%s%s'%(dataset,'_obs' if obs else ''),nyu_dists)
    other_dists['Tompson'] = [nyu_dists, None]

    # Load training set predictions
    if False:
        _,d,_ = loadpreds(dataset, expdir + '/preds_train.h5', pred_opts, dotrain=True)
        other_dists['Train'] = [d, None]

    filt = None

    print "Creating overview page"
    # Main report creation
    pdf = PdfPages(expdir + '/report.pdf')

    # Training overview page
    trainingoverview(dataset, dists, [filt], [exp_id], exp_id=exp_id,
                     other_dists=other_dists, pdf=pdf)

    if images:
        print "Creating prediction examples page"
        # Overall performance examples
        num_good_exs = 2
        num_bad_exs = 6
        for i in xrange(num_good_exs):
            sampleimages(annot,preds,dists,pdf=pdf,page_num=i+1)
        for i in xrange(num_bad_exs):
            sampleimages(annot,preds,dists,get_worst=True,pdf=pdf,page_num=i+1)

        # print "Creating part heatmap examples"
        # # Heatmap examples
        # for i in xrange(len(ref.part_pairs[dataset])):
        #     title = ref.pair_names[dataset][i]
        #     pt_names = ref.part_pairs[dataset][i]
        #     if not title == 'face':
        #         partheatmaps(annot,preds,preds_raw,dists=dists,partnames=pt_names,title='%s Heatmap Examples'%title,
        #                      pdf=pdf, page_num=1, vert=vert)
        #         for j in xrange(1,3):
        #             partheatmaps(annot,preds,preds_raw,dists=dists,partnames=pt_names,title='%s Heatmap Examples'%title,
        #                          pdf=pdf, page_num=j, vert=vert, get_worst=True)

    pdf.close()
コード例 #3
0
ファイル: data.py プロジェクト: BoAdBo/AlphaPose
def generateset(dataset, settype, filename, numsamples, datadir=None, chg=None, sampletype='default', idxs=None, extra_args=None):
    # Generate full hdf5 dataset

    # Path to output file
    if datadir is None:
        filepath = ref.posedir + '/data/' + dataset + '/' + filename + '.h5'
    else:
        filepath = datadir + '/' + dataset + '/' + filename + '.h5'
    # Load in annotations
    annot = ref.load(dataset, settype)

    # Option to strictly follow the order of the provided annotations
    # Useful for generating test sets.
    if idxs is None:
        numavailable = len(annot['index']) # Number of available samples
    else:
        numavailable = len(idxs)
    inorder = False
    if numsamples == -1:
        numsamples = numavailable
        inorder = True

    print ""
    print "Generating %s %s set: %s" % (dataset, sampletype, settype)
    print "Path to dataset: %s" % filepath
    print "Number of samples: %d" % numsamples
    print "Data augmentation: %s" % (str(chg))

    # Data/label sizes can be all over the place, this is the easiest way to check
    ex_in, ex_out = gensample(annot, 0, chg=chg, sampletype=sampletype, extra_args=extra_args)

    # Initialize numpy arrays to hold data
    data = np.zeros((numsamples, ex_in.shape[0], ex_in.shape[1], ex_in.shape[2]), np.float32)
    label = np.zeros((numsamples, ex_out.shape[0], ex_out.shape[1], ex_out.shape[2]), np.float32)
    ref_idxs = np.zeros((numsamples, 1), np.float32)

    # Loop to generate new samples
    print ''
    print '| Progress            |'
    print '|',
    sys.stdout.flush()

    starttime = time.time()
    for i in xrange(numsamples):
        if idxs is not None: idx = idxs[i]
        elif inorder: idx = i
        else: idx = np.random.randint(numavailable)

        data[i], label[i] = gensample(annot, idx, chg=chg, sampletype=sampletype, extra_args=extra_args)
        ref_idxs[i] = idx

        if i % (numsamples/10) == 0:
            print '=',
            sys.stdout.flush()

    print '|'
    print ''
    print 'Done!',
    print '(%.2f seconds to complete.)' % (time.time() - starttime)
    print ''

    # Write out to hdf5 files
    with h5py.File(filepath, 'w') as f:
        f['data'] = data
        f['label'] = label
        f['index'] = ref_idxs
コード例 #4
0
def generateset(dataset,
                settype,
                filename,
                numsamples,
                datadir=None,
                chg=None,
                sampletype='default',
                idxs=None,
                extra_args=None):
    # Generate full hdf5 dataset

    # Path to output file
    if datadir is None:
        filepath = ref.posedir + '/data/' + dataset + '/' + filename + '.h5'
    else:
        filepath = datadir + '/' + dataset + '/' + filename + '.h5'
    # Load in annotations
    annot = ref.load(dataset, settype)

    # Option to strictly follow the order of the provided annotations
    # Useful for generating test sets.
    if idxs is None:
        numavailable = len(annot['index'])  # Number of available samples
    else:
        numavailable = len(idxs)
    inorder = False
    if numsamples == -1:
        numsamples = numavailable
        inorder = True

    print ""
    print "Generating %s %s set: %s" % (dataset, sampletype, settype)
    print "Path to dataset: %s" % filepath
    print "Number of samples: %d" % numsamples
    print "Data augmentation: %s" % (str(chg))

    # Data/label sizes can be all over the place, this is the easiest way to check
    ex_in, ex_out = gensample(annot,
                              0,
                              chg=chg,
                              sampletype=sampletype,
                              extra_args=extra_args)

    # Initialize numpy arrays to hold data
    data = np.zeros(
        (numsamples, ex_in.shape[0], ex_in.shape[1], ex_in.shape[2]),
        np.float32)
    label = np.zeros(
        (numsamples, ex_out.shape[0], ex_out.shape[1], ex_out.shape[2]),
        np.float32)
    ref_idxs = np.zeros((numsamples, 1), np.float32)

    # Loop to generate new samples
    print ''
    print '| Progress            |'
    print '|',
    sys.stdout.flush()

    starttime = time.time()
    for i in xrange(numsamples):
        if idxs is not None: idx = idxs[i]
        elif inorder: idx = i
        else: idx = np.random.randint(numavailable)

        data[i], label[i] = gensample(annot,
                                      idx,
                                      chg=chg,
                                      sampletype=sampletype,
                                      extra_args=extra_args)
        ref_idxs[i] = idx

        if i % (numsamples / 10) == 0:
            print '=',
            sys.stdout.flush()

    print '|'
    print ''
    print 'Done!',
    print '(%.2f seconds to complete.)' % (time.time() - starttime)
    print ''

    # Write out to hdf5 files
    with h5py.File(filepath, 'w') as f:
        f['data'] = data
        f['label'] = label
        f['index'] = ref_idxs
コード例 #5
0
ファイル: report.py プロジェクト: zoombapup/AlphaPose
def main(argv):
    dataset = None
    exp_id = None
    extra = []
    prev = []
    other_dists = {}
    vert = False
    images = False
    obs = False
    limb = False

    # Process command line arguments
    try:
        opts, args = getopt.getopt(argv, "hd:e:c:p:viol", ["help", "dataset=", "expID=", "compare=", "prev=",
                                                           "vert", "images", "obs", "limb"])
    except getopt.GetoptError:
        print("Incorrect arguments")
        helpmessage()
        sys.exit(2)
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            helpmessage()
        elif opt in ('-d', '--dataset'): dataset = arg
        elif opt in ('-e', '--expID'): exp_id = arg
        elif opt in ('-c', '--compare'): extra += arg.split(',')
        elif opt in ('-p', '--prev'): prev += arg.split(',')
        elif opt in ('-v', '--vert'): vert = True
        elif opt in ('-i', '--images'): images = True
        elif opt in ('-o', '--obs'): obs = True
        elif opt in ('-l', '--limb'): limb = True

    if dataset is None:
        print("No dataset chosen.")
        helpmessage()
    if not (dataset in ['mpii','flic']):
        print("Bad argument for --dataset")
        helpmessage()
    if exp_id is None:
        print("No experiment number provided.")
        helpmessage()
    expdir = ref.posedir + '/exp/' + dataset + '/' + exp_id

    # Generate the simple report for mini limb networks
    if limb:
        pdf = PdfPages(expdir + '/report.pdf')
        limbreport(dataset, exp_id, pdf)
        pdf.close()
        return

    # Load in dataset information
    num_parts = len(ref.parts[dataset])
    if obs:
        annot = ref.load(dataset, 'test_obs')
        eval.annot[dataset] = annot
    else:
        annot = ref.load(dataset, 'valid')

    # Load predictions
    print("Loading predictions")
    pred_opts = [num_parts, vert, obs]
    preds, dists, _ = loadpreds(dataset, expdir + '/preds.h5', pred_opts, images)

    # Load previous predictions
    for prv in prev:
        _,d,_ = loadpreds(dataset, expdir + '/preds_%s.h5' % prv, pred_opts)
        other_dists[prv] = [d, None]

    # Load comparison predictions
    for ext in extra:
        predfile = ref.posedir + '/exp/' + dataset + '/' + ext + '/preds.h5'
        _,d,_ = loadpreds(dataset, predfile, pred_opts)
        other_dists[ext] = [d, None]

    # Load previous best
    if vert: predfile = expdir + '/../best/preds_vert.h5'
    else: predfile = expdir + '/../best/preds.h5'
    _,best_dists,_ = loadpreds(dataset, predfile, pred_opts)
    #other_dists["Kaiyu's best model"] = [best_dists, None]

    # Load NYU predictions
    if dataset == 'mpii':
        nyu_dists = np.load(eval.get_path(dataset, 'nyu_dists'))
    else:
        if not obs: nyu_preds = np.load(eval.get_path(dataset, 'nyu_pred'))
        else: nyu_preds = np.load(eval.get_path(dataset, 'nyu_pred_obs'))
        nyu_dists = eval.getdists(nyu_preds)
        np.save('nyu_dists_%s%s'%(dataset,'_obs' if obs else ''),nyu_dists)
    other_dists['Tompson'] = [nyu_dists, None]

    # Load training set predictions
    if False:
        _,d,_ = loadpreds(dataset, expdir + '/preds_train.h5', pred_opts, dotrain=True)
        other_dists['Train'] = [d, None]

    filt = None

    print("Creating overview page")
    # Main report creation
    pdf = PdfPages(expdir + '/report.pdf')

    # Training overview page
    trainingoverview(dataset, dists, [filt], [exp_id], exp_id=exp_id,
                     other_dists=other_dists, pdf=pdf)

    if images:
        print("Creating prediction examples page")
        # Overall performance examples
        num_good_exs = 2
        num_bad_exs = 6
        for i in xrange(num_good_exs):
            sampleimages(annot,preds,dists,pdf=pdf,page_num=i+1)
        for i in xrange(num_bad_exs):
            sampleimages(annot,preds,dists,get_worst=True,pdf=pdf,page_num=i+1)

        # print "Creating part heatmap examples"
        # # Heatmap examples
        # for i in xrange(len(ref.part_pairs[dataset])):
        #     title = ref.pair_names[dataset][i]
        #     pt_names = ref.part_pairs[dataset][i]
        #     if not title == 'face':
        #         partheatmaps(annot,preds,preds_raw,dists=dists,partnames=pt_names,title='%s Heatmap Examples'%title,
        #                      pdf=pdf, page_num=1, vert=vert)
        #         for j in xrange(1,3):
        #             partheatmaps(annot,preds,preds_raw,dists=dists,partnames=pt_names,title='%s Heatmap Examples'%title,
        #                          pdf=pdf, page_num=j, vert=vert, get_worst=True)

    pdf.close()
コード例 #6
0
ファイル: eval.py プロジェクト: BoAdBo/AlphaPose
import numpy as np
import ref
import img

# Reference for other predictions
other_preds = {'nyu':{'flic':'nyu_pred', 'mpii':'nyu_pred'}}
def get_path(dataset_name, file_name):
    return ref.posedir + '/data/' + dataset_name + '/ref/' + file_name + '.npy'

# Load ground truth annotations
annot = {'flic':ref.load('flic','test'),
         'mpii':ref.load('mpii','valid'),
         'mpii_train':ref.load('mpii','train'),
         'mpii_test':ref.load('mpii','test')}

def getdists(pred, dotrain=False):
    # Get normalized distances between predictions and ground truth

    # Automatically figures out dataset based on number of parts
    if pred.shape[1] == 11:
        dataset = 'flic'
    elif pred.shape[1] == 16:
        dataset = 'mpii'
    else:
        print "Error: Bad prediction file."
        return 0

    idx_ref = []
    if dotrain:
        idx_ref = np.load(get_path(dataset,'train_idxs'))
        dataset += '_train'