Exemplo n.º 1
0
    def compute_performance(self, instances, params):
        depth_errs = []
        depthmax_errs = []
        label_errs = []
        twolabel_errs = []

        for instance in instances:
            gt = instance.GetGroundTruth()
            gt_labels = instance.GetGroundTruthLabels()
            gt_depths = instance.GetGroundTruthDepths()

            payoffs = py_indoor_context.DPPayoffs()

            self.ftrmgr.LoadFeaturesFor(instance)
            self.ftrmgr.Compile(params, payoffs)

            hyp = self.inference.Solve(instance, payoffs)
            hyp_labels = self.inference.GetSolutionLabels()
            hyp_depths = self.inference.GetSolutionDepths()

            depth_errs.append(compute_depth_error(gt_depths, hyp_depths))
            depthmax_errs.append(compute_depthmax_error(gt_depths, hyp_depths))
            label_errs.append(compute_label_error(gt_labels, hyp_labels))
            twolabel_errs.append(compute_twolabel_error(gt_labels, hyp_labels))

        return ( np.mean(depth_errs),
                 np.mean(depthmax_errs),
                 np.mean(label_errs),
                 np.mean(twolabel_errs) )
Exemplo n.º 2
0
def classify_example(instance, model, sparm):
    """Given a pattern x, return the predicted label."""

    params = training_helpers.create_params_from_psi(list(model.w))
    payoffs = py_indoor_context.DPPayoffs()
    FtrMgr.LoadFeaturesFor(instance)
    FtrMgr.CompileFeatures(params, payoffs)
    soln = Inference.Solve(instance, payoffs)
    return soln
Exemplo n.º 3
0
np.set_printoptions(precision=3)

w = np.array([
    1.,
    -1.,
    0.,
])
params = py_indoor_context.ManhattanHyperParameters(w, 4., 6.)

mgr = py_indoor_context.TrainingManager()
mgr.LoadSequence("lab_kitchen1", [70])
mgr.ComputeMockFeatures()
inst = mgr.GetInstance(0)

payoffs = py_indoor_context.DPPayoffs()
inst.CompileFeatures(params, payoffs)

gt_path = inst.GetGroundTruth().GetPath()

solver = py_indoor_context.ManhattanInference()
soln = solver.Solve(inst, payoffs)
soln_path = soln.GetPath()

# Draw
vs = payoffs.Get(0) + payoffs.Get(1)
plt.clf()
plt.title('Solution')
plt.imshow(vs)
plt.plot(gt_path, 'w', linewidth=2.)
plt.plot(soln_path, 'g', linewidth=1.)
Exemplo n.º 4
0
    def generate_report_for(self,
                            params,
                            instances,
                            psis,          # params for each iteration
                            stats,         # array of errors for each iteration
                            report_file,   # .pdf with lots of figures
                            data_file,     # .pickle with lots of python data
                            perf_file,     # .csv with performance summary per instance
                            iters_file,    # .csv with performance at each iteration
                            extended=False):
        assert(len(instances) > 0)
        assert(len(psis) == len(stats))

        print_params(params)

        self.ftrmgr.LoadFeaturesFor(instances[0])  # ensure that something is loaded
        ftr_comments = [ self.ftrmgr.GetFeatureComment(i)
                         for i in range(self.ftrmgr.NumFeatures()) ]
        component_names = ftr_comments + ['Corner penalty', 'Occlusion penalty']

        param_history = [ params_to_dict(create_params_from_psi(psi))
                          for psi in psis ]

        topickle = {}  # to be pickle'd to a file at the end
        topickle['params'] = params_to_dict(params)
        topickle['features'] = ftr_comments
        topickle['results'] = {}
        topickle['params_history'] = param_history
        topickle['performance_history'] = stats

        depth_errs = []
        depthmax_errs = []
        label_errs = []
        twolabel_errs = []

        pdf = PdfPages(report_file)
        for instance in instances:
            instance_name = '%s:%s' % (instance.GetSequenceName(),
                                       instance.GetFrameId())

            # Get ground truth
            gt = instance.GetGroundTruth()
            gt_labels = instance.GetGroundTruthLabels()
            gt_depths = instance.GetGroundTruthDepths()
            gt_path = gt.GetPath()

            # Compute a payoff matrix
            payoffs = py_indoor_context.DPPayoffs()
            self.ftrmgr.LoadFeaturesFor(instance)
            self.ftrmgr.Compile(params, payoffs)

            # Run the inference algorithm
            hyp = self.inference.Solve(instance, payoffs)
            hyp_labels = self.inference.GetSolutionLabels()
            hyp_depths = self.inference.GetSolutionDepths()
            hyp_path = hyp.GetPath()

            # Compute scores (proportional to posterior)
            hyp_score = payoffs.ComputeScore(hyp)
            gt_score = payoffs.ComputeScore(gt)

            # Compute errors
            depth_err = compute_depth_error(gt_depths, hyp_depths)
            depthmax_err = compute_depthmax_error(gt_depths, hyp_depths)
            label_err = compute_label_error(gt_labels, hyp_labels)
            twolabel_err = compute_twolabel_error(gt_labels, hyp_labels)

            depth_errs.append(depth_err)
            depthmax_errs.append(depthmax_err)
            label_errs.append(label_err)
            twolabel_errs.append(twolabel_err)

            # Add to record to be pickled
            record = { 'sequence': instance.GetSequenceName(),
                       'frame_id': instance.GetFrameId(),
                       'estimated_path':hyp_path,
                       'estimated_path_orients':hyp.GetOrients(),
                       'estimated_score':hyp_score,
                       'gt_path':gt_path,
                       'gt_path_orients':gt.GetOrients(),
                       'gt_score':gt_score,
                       'depth_error':depth_err,
                       'depthmax_error':depthmax_err,
                       'labelling_error':label_err,
                       'twolabelling_error':twolabel_err,
                       }
            topickle['results'][instance_name] = record

            if extended:
                # Draw the solution as a PNG
                filename = ('%s_frame%03d_soln.png' % (instance.GetSequenceName(),
                                                       instance.GetFrameId()))
                vizfile = os.path.join(self.viz_dir, filename)
                self.inference.OutputSolutionViz(vizfile)

                # Plot the payoffs and paths
                self.draw_payoffs(payoffs, hyp_path, gt_path, instance_name)
                pdf.savefig()
                
                # Plot labels in grid coords
                self.draw_labels(hyp_labels, gt_labels, instance_name)
                pdf.savefig()

        depth_errs = np.array(depth_errs)
        depthmax_errs = np.array(depthmax_errs)
        label_errs = np.array(label_errs)
        twolabel_errs = np.array(twolabel_errs)

        # Plot the distribution of errors
        self.plot_error_hist(depth_errs)
        plt.title('Distribution of depth errors')
        pdf.savefig()

        # Plot the distribution of errors
        self.plot_error_hist(depthmax_errs)
        plt.title('Distribution of depth-max errors')
        pdf.savefig()

        self.plot_error_hist(label_errs)
        plt.title('Distribution of labelling errors')
        pdf.savefig()

        self.plot_error_hist(twolabel_errs)
        plt.title('Distribution of two-labelling errors')
        pdf.savefig()

        self.plot_error_scatter(label_errs, depth_errs)
        pdf.savefig()

        # Write per-instance performance to CSV
        with open(perf_file, 'w') as f:
            w = csv.writer(f)
            w.writerow([ 'Instance',
                         'Depth Error',
                         'Depth-max Error',
                         'Labelling Error',
                         'Two-Labelling Error' ])
            for instance_name in topickle['results']:
                r = topickle['results'][instance_name]
                w.writerow([ instance_name,
                             r['depth_error'],
                             r['depthmax_error'],
                             r['labelling_error'],
                             r['twolabelling_error'] ])

        # Compute final performance summary
        # Do not append these to self.stats because that array is full of
        # evaluations on a *sample* of the instances
        av_depth_err = np.mean(depth_errs)
        av_depthmax_err = np.mean(depthmax_errs)
        av_label_err = np.mean(label_errs)
        av_twolabel_err = np.mean(twolabel_errs)

        # Save performance summary to be pickled
        topickle['average_depth_error'] = av_depth_err
        topickle['average_depthmax_error'] = av_depthmax_err
        topickle['average_labelling_error'] = av_label_err
        topickle['average_twolabelling_error'] = av_twolabel_err

        # Print performance summary to output
        print 'Depth errors: ',depth_errs*100.
        print 'Depth-max errors: ',depthmax_errs*100.
        print 'Labelling errors: ',label_errs*100.
        print '2-Labelling errors: ',twolabel_errs*100.

        print '{:<30}{:.1f}%'.format('Av. depth error:', av_depth_err*100.)
        print '{:<30}{:.1f}%'.format('Av. depth-max error:', av_depthmax_err*100.)
        print '{:<30}{:.1f}%'.format('Av. labelling error:', av_label_err*100.)
        print '{:<30}{:.1f}%'.format('Av. 2-labelling error:', av_twolabel_err*100.)

        # Generate per-iteration report
        if len(stats) > 0:
            stats = np.array(stats)
            iter_depth_errs = stats[:,0]
            iter_depthmax_errs = stats[:,1]
            iter_label_errs = stats[:,2]
            iter_twolabel_errs = stats[:,3]

            # Plot evolution of parameters over time
            self.plot_param_evolution(np.asarray(psis), component_names)
            pdf.savefig()
            
            # Plot error versus iteration
            self.plot_error_evolution(iter_depth_errs)
            plt.ylabel('Average depth error (%)')
            pdf.savefig()

            self.plot_error_evolution(iter_depthmax_errs)
            plt.ylabel('Average depth-max error (%)')
            pdf.savefig()

            self.plot_error_evolution(iter_label_errs)
            plt.ylabel('Average labelling error (%)')
            pdf.savefig()

            self.plot_error_evolution(iter_twolabel_errs)
            plt.ylabel('Average two-labelling error (%)')
            pdf.savefig()

            # Write iteration details to csv
            with open(iters_file, 'w') as f:
                w = csv.writer(f)
                w.writerow([ 'Iteration',
                             'Mean depth error',
                             'Mean depth-max error',
                             'Mean labelling error',
                             'Mean two-labelling error' ] + component_names)
                for itr,(stat,psi) in enumerate(zip(stats,psis)):
                    assert(len(stat) == 4)
                    assert(len(psi) == len(component_names))
                    w.writerow([itr] + list(stat) + list(psi))

        # Save the pickle data
        with open(data_file, 'w') as f:
            cPickle.dump(topickle, f)

        # Write the PDF
        pdf.close()
Exemplo n.º 5
0
def find_most_violated_constraint_margin(instance, gt, model, sparm):
    """Return ybar associated with x's most violated constraint.

    The find most violated constraint function for margin rescaling.
    The default behavior is that this returns the value from the
    general find_most_violated_constraint function."""

    #print '\n\nFinding most violated constraint'

    assert (isinstance(instance, py_indoor_context.TrainingInstance))
    assert (isinstance(gt, py_indoor_context.ManhattanHypothesis))

    params = training_helpers.create_params_from_psi(list(model.w))

    aug_payoffs = py_indoor_context.DPPayoffs()
    reg_payoffs = py_indoor_context.DPPayoffs()

    FtrMgr.LoadFeaturesFor(instance)
    FtrMgr.CompileWithLoss(params, instance, aug_payoffs)
    FtrMgr.Compile(params, reg_payoffs)

    # Solve augmented problem
    aug_soln = Inference.Solve(instance, aug_payoffs)
    score = aug_payoffs.ComputeScore(aug_soln)

    # Compute loss on regular problem
    reg_score = reg_payoffs.ComputeScore(aug_soln)
    reg_loss = instance.ComputeLoss(aug_soln)

    # Check the score
    check_score = reg_score + reg_loss
    if errcheck(score, check_score):
        print '\n*** Inconsistent score!'
        print '  rel error:', relerr(score, check_score)
        print '  abs error:', abserr(score, check_score)
        print '  Aug score:', score
        print '  Reg score:', reg_score
        print '  Loss:', reg_loss
        print '  Reg score + loss:', check_score
        print '  Error:', abs(score - check_score)
        training_helpers.print_params(params)
        #exit(0)

    # check the score another way
    ftr = training_helpers.get_feature(FtrMgr, instance, aug_soln)
    reg_score2 = np.dot(list(ftr), list(model.w))
    reg_loss2 = loss(gt, aug_soln, sparm)
    if errcheck(reg_score, reg_score2):
        print '\n*** Inconsistent score!'
        print '  rel error:', relerr(reg_score, reg_score2)
        print '  abs error:', abserr(reg_score, reg_score2)
        print '  ftr <dot> soln:', reg_score2
        print '  payoffs.ComputeScore(soln):', reg_score
        print '  Instance: %s:%d' % (instance.GetSequenceName(),
                                     instance.GetFrameId())
        print '  ftr:', ftr
        print '  model.w:', list(model.w)
        training_helpers.print_params(params)
        #exit(0)

    # check the loss
    if errcheck(reg_loss, reg_loss2):
        print '\n*** Inconsistent loss!'
        print '  rel error:', relerr(reg_loss, reg_loss2)
        print '  abs error:', abserr(reg_loss, reg_loss2)
        print '  instance.ComputeLoss(soln):', reg_loss
        print '  loss(...):', reg_loss2
        training_helpers.print_params(params)
        #exit(0)

    # Compute GT score and check slack
    gt_score = reg_payoffs.ComputeScore(gt)
    margin = gt_score - reg_score  # this is the margin we're trying to maximize!
    if (margin > reg_loss):
        # The ground truth might not be in the hypothesis class
        # (e.g. when the GT path extends beyond the grid bounds), so
        # the most-violated inference might find a constraint that has
        # slack lower than that for the ground truth. The slack for
        # the ground truth is always zero, so if the slack for the
        # solution that the DP found is negative then we replace it
        # with the ground truth. One way to think about it is that our
        # hypothesis class is {all representable manhattan models} +
        # {ground truth}, which we perform inference in by comparing
        # the best representable manhattan model (as found by DP) to
        # the ground truth. The problem here is that at test time
        # we're performing inference in the hypothesis class {all
        # representable manhattan models}. I don't know what the
        # impact of training and testing on these subtly different
        # hypothesis classes is.
        aug_soln = gt
        print '\n+++Negative slack, replacing with gt (slack=%f)' % (reg_loss -
                                                                     margin)
        #print '  Margin:',margin
        #print '  Loss:',reg_loss
        #print '  Slack:',reg_loss-margin

    #print '\n\nFinding most violated constraint'
    #print '  data weights: ',params.GetWeights()
    #print '  corner penalty:',params.GetCornerPenalty()
    #print '  occlusion penalty:',params.GetOcclusionPenalty()
    #print '  feature(true): ',gt_ftr
    #print '  feature(aug-soln): ',aug_ftr
    #print '  score(aug-soln): ',np.dot(list(model.w), aug_ftr)
    #print '  loss(aug-soln): ',gt.GetInstance().ComputeLoss(aug_soln)

    return aug_soln