Example #1
0
def loadpreds(dataset, predfile, pred_opts, get_hms=False, dotrain=False):
    num_parts, vert, obs = pred_opts
    hms = None
    with h5py.File(predfile, 'r+') as f:
        # Choose appropriate key
        if vert: k = 'preds_v'
        else: k = 'preds_tf'
        # Load predictions
        if k in f.keys():
            preds = np.array(f[k])
        else:
            preds = eval.transformpreds(dataset, f['preds'],
                                        [64, 64], rot=vert)
            f[k] = preds

        # Ignore additional predictions from segmentation (soon to be unnecessary)
        if preds.shape[1] > num_parts:
            preds = preds[:,:num_parts,:]
        # Also load heatmaps if necessary
        if get_hms:
            hms = np.array(f['preds_raw'])

        # Load distances
        dist_key = 'dist_'
        if vert: dist_key += 'v'
        if obs: dist_key += 'o'
        if dist_key in f.keys():
            dists = np.array(f[dist_key])
        else:
            # Evaluate distances to ground truth
            dists = eval.getdists(preds, dotrain)
            f[dist_key] = dists
    return preds, dists, hms
Example #2
0
def loadpreds(dataset, predfile, pred_opts, get_hms=False, dotrain=False):
    num_parts, vert, obs = pred_opts
    hms = None
    with h5py.File(predfile, 'r+') as f:
        # Choose appropriate key
        if vert: k = 'preds_v'
        else: k = 'preds_tf'
        # Load predictions
        if k in f.keys():
            preds = np.array(f[k])
        else:
            preds = eval.transformpreds(dataset, f['preds'],
                                        [64, 64], rot=vert)
            f[k] = preds

        # Ignore additional predictions from segmentation (soon to be unnecessary)
        if preds.shape[1] > num_parts:
            preds = preds[:,:num_parts,:]
        # Also load heatmaps if necessary
        if get_hms:
            hms = np.array(f['preds_raw'])

        # Load distances
        dist_key = 'dist_'
        if vert: dist_key += 'v'
        if obs: dist_key += 'o'
        if dist_key in f.keys():
            dists = np.array(f[dist_key])
        else:
            # Evaluate distances to ground truth
            dists = eval.getdists(preds, dotrain)
            f[dist_key] = dists
    return preds, dists, hms
Example #3
0
def main(argv):
    dataset = None
    exp_id = None
    extra = []
    prev = []
    other_dists = {}
    vert = False
    images = False
    obs = False
    limb = False

    # Process command line arguments
    try:
        opts, args = getopt.getopt(argv, "hd:e:c:p:viol", ["help", "dataset=", "expID=", "compare=", "prev=",
                                                           "vert", "images", "obs", "limb"])
    except getopt.GetoptError:
        print "Incorrect arguments"
        helpmessage()
        sys.exit(2)
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            helpmessage()
        elif opt in ('-d', '--dataset'): dataset = arg
        elif opt in ('-e', '--expID'): exp_id = arg
        elif opt in ('-c', '--compare'): extra += arg.split(',')
        elif opt in ('-p', '--prev'): prev += arg.split(',')
        elif opt in ('-v', '--vert'): vert = True
        elif opt in ('-i', '--images'): images = True
        elif opt in ('-o', '--obs'): obs = True
        elif opt in ('-l', '--limb'): limb = True

    if dataset is None:
        print "No dataset chosen."
        helpmessage()
    if not (dataset in ['mpii','flic']):
        print "Bad argument for --dataset"
        helpmessage()
    if exp_id is None:
        print "No experiment number provided."
        helpmessage()
    expdir = ref.posedir + '/exp/' + dataset + '/' + exp_id

    # Generate the simple report for mini limb networks
    if limb:
        pdf = PdfPages(expdir + '/report.pdf')
        limbreport(dataset, exp_id, pdf)
        pdf.close()
        return

    # Load in dataset information
    num_parts = len(ref.parts[dataset])
    if obs:
        annot = ref.load(dataset, 'test_obs')
        eval.annot[dataset] = annot
    else:
        annot = ref.load(dataset, 'valid')

    # Load predictions
    print "Loading predictions"
    pred_opts = [num_parts, vert, obs]
    preds, dists, _ = loadpreds(dataset, expdir + '/preds.h5', pred_opts, images)

    # Load previous predictions
    for prv in prev:
        _,d,_ = loadpreds(dataset, expdir + '/preds_%s.h5' % prv, pred_opts)
        other_dists[prv] = [d, None]

    # Load comparison predictions
    for ext in extra:
        predfile = ref.posedir + '/exp/' + dataset + '/' + ext + '/preds.h5'
        _,d,_ = loadpreds(dataset, predfile, pred_opts)
        other_dists[ext] = [d, None]

    # Load previous best
    if vert: predfile = expdir + '/../best/preds_vert.h5'
    else: predfile = expdir + '/../best/preds.h5'
    _,best_dists,_ = loadpreds(dataset, predfile, pred_opts)
    #other_dists["Kaiyu's best model"] = [best_dists, None]

    # Load NYU predictions
    if dataset == 'mpii':
        nyu_dists = np.load(eval.get_path(dataset, 'nyu_dists'))
    else:
        if not obs: nyu_preds = np.load(eval.get_path(dataset, 'nyu_pred'))
        else: nyu_preds = np.load(eval.get_path(dataset, 'nyu_pred_obs'))
        nyu_dists = eval.getdists(nyu_preds)
        np.save('nyu_dists_%s%s'%(dataset,'_obs' if obs else ''),nyu_dists)
    other_dists['Tompson'] = [nyu_dists, None]

    # Load training set predictions
    if False:
        _,d,_ = loadpreds(dataset, expdir + '/preds_train.h5', pred_opts, dotrain=True)
        other_dists['Train'] = [d, None]

    filt = None

    print "Creating overview page"
    # Main report creation
    pdf = PdfPages(expdir + '/report.pdf')

    # Training overview page
    trainingoverview(dataset, dists, [filt], [exp_id], exp_id=exp_id,
                     other_dists=other_dists, pdf=pdf)

    if images:
        print "Creating prediction examples page"
        # Overall performance examples
        num_good_exs = 2
        num_bad_exs = 6
        for i in xrange(num_good_exs):
            sampleimages(annot,preds,dists,pdf=pdf,page_num=i+1)
        for i in xrange(num_bad_exs):
            sampleimages(annot,preds,dists,get_worst=True,pdf=pdf,page_num=i+1)

        # print "Creating part heatmap examples"
        # # Heatmap examples
        # for i in xrange(len(ref.part_pairs[dataset])):
        #     title = ref.pair_names[dataset][i]
        #     pt_names = ref.part_pairs[dataset][i]
        #     if not title == 'face':
        #         partheatmaps(annot,preds,preds_raw,dists=dists,partnames=pt_names,title='%s Heatmap Examples'%title,
        #                      pdf=pdf, page_num=1, vert=vert)
        #         for j in xrange(1,3):
        #             partheatmaps(annot,preds,preds_raw,dists=dists,partnames=pt_names,title='%s Heatmap Examples'%title,
        #                          pdf=pdf, page_num=j, vert=vert, get_worst=True)

    pdf.close()
Example #4
0
def partheatmaps(annot, preds, preds_raw, dists=None, partnames=None, filt=None, num_rows=7, vert=False, num_cols=2, res=default_res, get_worst=False, page_num=1, pdf=None, title='Prediction Examples'):
    # Dataset name
    ds = annot.attrs['name']

    # Initialize blank page
    plt.clf()
    fig = plt.figure(figsize=(8.5,11), dpi=100, facecolor='w')
    ax = fig.add_subplot(111)

    # If no specific parts have been chosen, use them all for scoring
    if partnames is None:
        partnames = ref.parts[ds]
    part_idxs = [ref.parts[ds].index(part) if type(part) is str else part for part in partnames]
    part_filt = [i in part_idxs for i in xrange(len(ref.parts[ds]))]
    page = np.zeros((res[0]*num_rows, res[1]*num_cols*(1+len(part_idxs)), 3), np.uint8)

    # If no filter is provided create entirely true array
    if filt is None:
        filt = np.array([True for _ in xrange(len(preds))])
    else:
        filt = filt.copy()

    # If no precalculated distances are provided, calculate them
    if dists is None:
        dists = eval.getdists(preds)

    # Determine scores from which we'll sort the images
    scores = np.zeros(len(preds))
    for i in xrange(len(preds)):
        # A bit of an interesting line below, gets the mean distance for a particular image
        # while only considering the parts we want and ignoring any parts where there's no annotation
        vals = dists[i, part_filt * (annot['part'][i,:,0] > 0)]
        if len(vals) > 0:
            scores[i] = vals.mean()
        else:
            # If no valid annotation to make a score, filter out this example
            filt[i] = False
    if get_worst:
        # Flip the scores if we're getting the worst images
        scores = -scores
    best_idxs = scores.argsort()
    if title[:4] == 'head' and get_worst:
        np.save('worst_head_idxs',best_idxs[:200])
    curr_idx = 0

    # Start filling in the overall image
    for i in xrange(page_num * num_rows * num_cols):
        while curr_idx < len(best_idxs) and not filt[best_idxs[curr_idx]]:
            curr_idx += 1
        if curr_idx >= len(best_idxs): break

        # If we're doing multiple pages, pass over the images that have already been used
        if i >= (page_num - 1) * num_rows * num_cols:
            idx = best_idxs[curr_idx]
            if vert:
                inp, _ = data.gensample(annot, idx, chg={'rotate':-1})
            else:
                inp, _ = data.gensample(annot, idx)
            new_img = img.sample_with_heatmap(ds, inp, preds_raw[idx], num_rows=1, parts_to_show=part_idxs)
            row = ((i % (num_rows * num_cols)) / num_cols) * res[0]
            col = ((i % (num_rows * num_cols)) % num_cols) * res[1] * (1+len(part_idxs))

            page[row:row+res[0], col:col+(res[1]*(1+len(part_idxs)))] = new_img
        curr_idx += 1

    # Plot management
    if not get_worst:
        title += ' - Best - '
    else:
        title += ' - Worst - '
    title += 'Page %d' % page_num
    ax.set_title(title)
    ax.imshow(page)
    ax.axis('off')
    fig.subplots_adjust(left=0.05,right=.95,bottom=0.05,top=.95)
    if pdf:
        pdf.savefig()
    else:
        plt.show()
    fig.clf()
Example #5
0
def main(argv):
    dataset = None
    exp_id = None
    extra = []
    prev = []
    other_dists = {}
    vert = False
    images = False
    obs = False
    limb = False

    # Process command line arguments
    try:
        opts, args = getopt.getopt(argv, "hd:e:c:p:viol", ["help", "dataset=", "expID=", "compare=", "prev=",
                                                           "vert", "images", "obs", "limb"])
    except getopt.GetoptError:
        print("Incorrect arguments")
        helpmessage()
        sys.exit(2)
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            helpmessage()
        elif opt in ('-d', '--dataset'): dataset = arg
        elif opt in ('-e', '--expID'): exp_id = arg
        elif opt in ('-c', '--compare'): extra += arg.split(',')
        elif opt in ('-p', '--prev'): prev += arg.split(',')
        elif opt in ('-v', '--vert'): vert = True
        elif opt in ('-i', '--images'): images = True
        elif opt in ('-o', '--obs'): obs = True
        elif opt in ('-l', '--limb'): limb = True

    if dataset is None:
        print("No dataset chosen.")
        helpmessage()
    if not (dataset in ['mpii','flic']):
        print("Bad argument for --dataset")
        helpmessage()
    if exp_id is None:
        print("No experiment number provided.")
        helpmessage()
    expdir = ref.posedir + '/exp/' + dataset + '/' + exp_id

    # Generate the simple report for mini limb networks
    if limb:
        pdf = PdfPages(expdir + '/report.pdf')
        limbreport(dataset, exp_id, pdf)
        pdf.close()
        return

    # Load in dataset information
    num_parts = len(ref.parts[dataset])
    if obs:
        annot = ref.load(dataset, 'test_obs')
        eval.annot[dataset] = annot
    else:
        annot = ref.load(dataset, 'valid')

    # Load predictions
    print("Loading predictions")
    pred_opts = [num_parts, vert, obs]
    preds, dists, _ = loadpreds(dataset, expdir + '/preds.h5', pred_opts, images)

    # Load previous predictions
    for prv in prev:
        _,d,_ = loadpreds(dataset, expdir + '/preds_%s.h5' % prv, pred_opts)
        other_dists[prv] = [d, None]

    # Load comparison predictions
    for ext in extra:
        predfile = ref.posedir + '/exp/' + dataset + '/' + ext + '/preds.h5'
        _,d,_ = loadpreds(dataset, predfile, pred_opts)
        other_dists[ext] = [d, None]

    # Load previous best
    if vert: predfile = expdir + '/../best/preds_vert.h5'
    else: predfile = expdir + '/../best/preds.h5'
    _,best_dists,_ = loadpreds(dataset, predfile, pred_opts)
    #other_dists["Kaiyu's best model"] = [best_dists, None]

    # Load NYU predictions
    if dataset == 'mpii':
        nyu_dists = np.load(eval.get_path(dataset, 'nyu_dists'))
    else:
        if not obs: nyu_preds = np.load(eval.get_path(dataset, 'nyu_pred'))
        else: nyu_preds = np.load(eval.get_path(dataset, 'nyu_pred_obs'))
        nyu_dists = eval.getdists(nyu_preds)
        np.save('nyu_dists_%s%s'%(dataset,'_obs' if obs else ''),nyu_dists)
    other_dists['Tompson'] = [nyu_dists, None]

    # Load training set predictions
    if False:
        _,d,_ = loadpreds(dataset, expdir + '/preds_train.h5', pred_opts, dotrain=True)
        other_dists['Train'] = [d, None]

    filt = None

    print("Creating overview page")
    # Main report creation
    pdf = PdfPages(expdir + '/report.pdf')

    # Training overview page
    trainingoverview(dataset, dists, [filt], [exp_id], exp_id=exp_id,
                     other_dists=other_dists, pdf=pdf)

    if images:
        print("Creating prediction examples page")
        # Overall performance examples
        num_good_exs = 2
        num_bad_exs = 6
        for i in xrange(num_good_exs):
            sampleimages(annot,preds,dists,pdf=pdf,page_num=i+1)
        for i in xrange(num_bad_exs):
            sampleimages(annot,preds,dists,get_worst=True,pdf=pdf,page_num=i+1)

        # print "Creating part heatmap examples"
        # # Heatmap examples
        # for i in xrange(len(ref.part_pairs[dataset])):
        #     title = ref.pair_names[dataset][i]
        #     pt_names = ref.part_pairs[dataset][i]
        #     if not title == 'face':
        #         partheatmaps(annot,preds,preds_raw,dists=dists,partnames=pt_names,title='%s Heatmap Examples'%title,
        #                      pdf=pdf, page_num=1, vert=vert)
        #         for j in xrange(1,3):
        #             partheatmaps(annot,preds,preds_raw,dists=dists,partnames=pt_names,title='%s Heatmap Examples'%title,
        #                          pdf=pdf, page_num=j, vert=vert, get_worst=True)

    pdf.close()
Example #6
0
def partheatmaps(annot, preds, preds_raw, dists=None, partnames=None, filt=None, num_rows=7, vert=False, num_cols=2, res=default_res, get_worst=False, page_num=1, pdf=None, title='Prediction Examples'):
    # Dataset name
    ds = annot.attrs['name']

    # Initialize blank page
    plt.clf()
    fig = plt.figure(figsize=(8.5,11), dpi=100, facecolor='w')
    ax = fig.add_subplot(111)

    # If no specific parts have been chosen, use them all for scoring
    if partnames is None:
        partnames = ref.parts[ds]
    part_idxs = [ref.parts[ds].index(part) if type(part) is str else part for part in partnames]
    part_filt = [i in part_idxs for i in xrange(len(ref.parts[ds]))]
    page = np.zeros((res[0]*num_rows, res[1]*num_cols*(1+len(part_idxs)), 3), np.uint8)

    # If no filter is provided create entirely true array
    if filt is None:
        filt = np.array([True for _ in xrange(len(preds))])
    else:
        filt = filt.copy()

    # If no precalculated distances are provided, calculate them
    if dists is None:
        dists = eval.getdists(preds)

    # Determine scores from which we'll sort the images
    scores = np.zeros(len(preds))
    for i in xrange(len(preds)):
        # A bit of an interesting line below, gets the mean distance for a particular image
        # while only considering the parts we want and ignoring any parts where there's no annotation
        vals = dists[i, part_filt * (annot['part'][i,:,0] > 0)]
        if len(vals) > 0:
            scores[i] = vals.mean()
        else:
            # If no valid annotation to make a score, filter out this example
            filt[i] = False
    if get_worst:
        # Flip the scores if we're getting the worst images
        scores = -scores
    best_idxs = scores.argsort()
    if title[:4] == 'head' and get_worst:
        np.save('worst_head_idxs',best_idxs[:200])
    curr_idx = 0

    # Start filling in the overall image
    for i in xrange(page_num * num_rows * num_cols):
        while curr_idx < len(best_idxs) and not filt[best_idxs[curr_idx]]:
            curr_idx += 1
        if curr_idx >= len(best_idxs): break

        # If we're doing multiple pages, pass over the images that have already been used
        if i >= (page_num - 1) * num_rows * num_cols:
            idx = best_idxs[curr_idx]
            if vert:
                inp, _ = data.gensample(annot, idx, chg={'rotate':-1})
            else:
                inp, _ = data.gensample(annot, idx)
            new_img = img.sample_with_heatmap(ds, inp, preds_raw[idx], num_rows=1, parts_to_show=part_idxs)
            row = ((i % (num_rows * num_cols)) / num_cols) * res[0]
            col = ((i % (num_rows * num_cols)) % num_cols) * res[1] * (1+len(part_idxs))

            page[row:row+res[0], col:col+(res[1]*(1+len(part_idxs)))] = new_img
        curr_idx += 1

    # Plot management
    if not get_worst:
        title += ' - Best - '
    else:
        title += ' - Worst - '
    title += 'Page %d' % page_num
    ax.set_title(title)
    ax.imshow(page)
    ax.axis('off')
    fig.subplots_adjust(left=0.05,right=.95,bottom=0.05,top=.95)
    if pdf:
        pdf.savefig()
    else:
        plt.show()
    fig.clf()