Example #1
0
def psi(instance, hyp, model, sparm):
    """Return a feature vector representing pattern x and label y.

    This is the combined feature function, which this returns either a
    svmapi.Sparse object, or sequence of svmapi.Sparse objects (useful
    during kernel evaluations, as all components undergo kernel
    evaluation separately).  There is no default behavior."""

    assert (isinstance(instance, py_indoor_context.TrainingInstance))
    assert (isinstance(hyp, py_indoor_context.ManhattanHypothesis)), type(hyp)

    return svmapi.Sparse(training_helpers.get_feature(FtrMgr, instance, hyp))
def psi(instance, hyp, model, sparm):
    """Return a feature vector representing pattern x and label y.

    This is the combined feature function, which this returns either a
    svmapi.Sparse object, or sequence of svmapi.Sparse objects (useful
    during kernel evaluations, as all components undergo kernel
    evaluation separately).  There is no default behavior."""

    assert isinstance(instance, py_indoor_context.TrainingInstance)
    assert isinstance(hyp, py_indoor_context.ManhattanHypothesis), type(hyp)

    return svmapi.Sparse(training_helpers.get_feature(FtrMgr, instance, hyp))
Example #3
0
#mgr.ComputeAllFeatures([-5,-1,1,5], True)
mgr.ComputeMockFeatures()

instances = [ mgr.GetInstance(i) for i in range(mgr.GetNumInstances()) ]

sum_err = 0.
for inst in instances:
    inst.ConfigureLabellingLoss()

    print '\nFrame ',inst.GetFrameId()
    payoffs = py_indoor_context.DPPayoffs()
    inst.CompileFeatures(params, payoffs)
    hyp = mgr.Solve(inst, payoffs)

    # Check score
    w = training_helpers.get_psi_from_params(params)
    ftr = training_helpers.get_feature(inst, hyp)
    print 'w: ',w
    print 'ftr: ',ftr
    score = np.dot(ftr, w)
    other_score = payoffs.ComputeScore(hyp)
    err = np.abs(score - other_score) / other_score
    sum_err += err
    print '  ftr <dot> w: ',score
    print '  ComputeScore(...): ',other_score
    print '  Error: %f%%' % (err*100.)

n = mgr.GetNumInstances()
print 'Average error: %f%%' % (sum_err * 100. / n)

Example #4
0
def find_most_violated_constraint_margin(instance, gt, model, sparm):
    """Return ybar associated with x's most violated constraint.

    The find most violated constraint function for margin rescaling.
    The default behavior is that this returns the value from the
    general find_most_violated_constraint function."""

    #print '\n\nFinding most violated constraint'

    assert (isinstance(instance, py_indoor_context.TrainingInstance))
    assert (isinstance(gt, py_indoor_context.ManhattanHypothesis))

    params = training_helpers.create_params_from_psi(list(model.w))

    aug_payoffs = py_indoor_context.DPPayoffs()
    reg_payoffs = py_indoor_context.DPPayoffs()

    FtrMgr.LoadFeaturesFor(instance)
    FtrMgr.CompileWithLoss(params, instance, aug_payoffs)
    FtrMgr.Compile(params, reg_payoffs)

    # Solve augmented problem
    aug_soln = Inference.Solve(instance, aug_payoffs)
    score = aug_payoffs.ComputeScore(aug_soln)

    # Compute loss on regular problem
    reg_score = reg_payoffs.ComputeScore(aug_soln)
    reg_loss = instance.ComputeLoss(aug_soln)

    # Check the score
    check_score = reg_score + reg_loss
    if errcheck(score, check_score):
        print '\n*** Inconsistent score!'
        print '  rel error:', relerr(score, check_score)
        print '  abs error:', abserr(score, check_score)
        print '  Aug score:', score
        print '  Reg score:', reg_score
        print '  Loss:', reg_loss
        print '  Reg score + loss:', check_score
        print '  Error:', abs(score - check_score)
        training_helpers.print_params(params)
        #exit(0)

    # check the score another way
    ftr = training_helpers.get_feature(FtrMgr, instance, aug_soln)
    reg_score2 = np.dot(list(ftr), list(model.w))
    reg_loss2 = loss(gt, aug_soln, sparm)
    if errcheck(reg_score, reg_score2):
        print '\n*** Inconsistent score!'
        print '  rel error:', relerr(reg_score, reg_score2)
        print '  abs error:', abserr(reg_score, reg_score2)
        print '  ftr <dot> soln:', reg_score2
        print '  payoffs.ComputeScore(soln):', reg_score
        print '  Instance: %s:%d' % (instance.GetSequenceName(),
                                     instance.GetFrameId())
        print '  ftr:', ftr
        print '  model.w:', list(model.w)
        training_helpers.print_params(params)
        #exit(0)

    # check the loss
    if errcheck(reg_loss, reg_loss2):
        print '\n*** Inconsistent loss!'
        print '  rel error:', relerr(reg_loss, reg_loss2)
        print '  abs error:', abserr(reg_loss, reg_loss2)
        print '  instance.ComputeLoss(soln):', reg_loss
        print '  loss(...):', reg_loss2
        training_helpers.print_params(params)
        #exit(0)

    # Compute GT score and check slack
    gt_score = reg_payoffs.ComputeScore(gt)
    margin = gt_score - reg_score  # this is the margin we're trying to maximize!
    if (margin > reg_loss):
        # The ground truth might not be in the hypothesis class
        # (e.g. when the GT path extends beyond the grid bounds), so
        # the most-violated inference might find a constraint that has
        # slack lower than that for the ground truth. The slack for
        # the ground truth is always zero, so if the slack for the
        # solution that the DP found is negative then we replace it
        # with the ground truth. One way to think about it is that our
        # hypothesis class is {all representable manhattan models} +
        # {ground truth}, which we perform inference in by comparing
        # the best representable manhattan model (as found by DP) to
        # the ground truth. The problem here is that at test time
        # we're performing inference in the hypothesis class {all
        # representable manhattan models}. I don't know what the
        # impact of training and testing on these subtly different
        # hypothesis classes is.
        aug_soln = gt
        print '\n+++Negative slack, replacing with gt (slack=%f)' % (reg_loss -
                                                                     margin)
        #print '  Margin:',margin
        #print '  Loss:',reg_loss
        #print '  Slack:',reg_loss-margin

    #print '\n\nFinding most violated constraint'
    #print '  data weights: ',params.GetWeights()
    #print '  corner penalty:',params.GetCornerPenalty()
    #print '  occlusion penalty:',params.GetOcclusionPenalty()
    #print '  feature(true): ',gt_ftr
    #print '  feature(aug-soln): ',aug_ftr
    #print '  score(aug-soln): ',np.dot(list(model.w), aug_ftr)
    #print '  loss(aug-soln): ',gt.GetInstance().ComputeLoss(aug_soln)

    return aug_soln
def find_most_violated_constraint_margin(instance, gt, model, sparm):
    """Return ybar associated with x's most violated constraint.

    The find most violated constraint function for margin rescaling.
    The default behavior is that this returns the value from the
    general find_most_violated_constraint function."""

    # print '\n\nFinding most violated constraint'

    assert isinstance(instance, py_indoor_context.TrainingInstance)
    assert isinstance(gt, py_indoor_context.ManhattanHypothesis)

    params = training_helpers.create_params_from_psi(list(model.w))

    aug_payoffs = py_indoor_context.DPPayoffs()
    reg_payoffs = py_indoor_context.DPPayoffs()

    FtrMgr.LoadFeaturesFor(instance)
    FtrMgr.CompileWithLoss(params, instance, aug_payoffs)
    FtrMgr.Compile(params, reg_payoffs)

    # Solve augmented problem
    aug_soln = Inference.Solve(instance, aug_payoffs)
    score = aug_payoffs.ComputeScore(aug_soln)

    # Compute loss on regular problem
    reg_score = reg_payoffs.ComputeScore(aug_soln)
    reg_loss = instance.ComputeLoss(aug_soln)

    # Check the score
    check_score = reg_score + reg_loss
    if errcheck(score, check_score):
        print "\n*** Inconsistent score!"
        print "  rel error:", relerr(score, check_score)
        print "  abs error:", abserr(score, check_score)
        print "  Aug score:", score
        print "  Reg score:", reg_score
        print "  Loss:", reg_loss
        print "  Reg score + loss:", check_score
        print "  Error:", abs(score - check_score)
        training_helpers.print_params(params)
        # exit(0)

    # check the score another way
    ftr = training_helpers.get_feature(FtrMgr, instance, aug_soln)
    reg_score2 = np.dot(list(ftr), list(model.w))
    reg_loss2 = loss(gt, aug_soln, sparm)
    if errcheck(reg_score, reg_score2):
        print "\n*** Inconsistent score!"
        print "  rel error:", relerr(reg_score, reg_score2)
        print "  abs error:", abserr(reg_score, reg_score2)
        print "  ftr <dot> soln:", reg_score2
        print "  payoffs.ComputeScore(soln):", reg_score
        print "  Instance: %s:%d" % (instance.GetSequenceName(), instance.GetFrameId())
        print "  ftr:", ftr
        print "  model.w:", list(model.w)
        training_helpers.print_params(params)
        # exit(0)

    # check the loss
    if errcheck(reg_loss, reg_loss2):
        print "\n*** Inconsistent loss!"
        print "  rel error:", relerr(reg_loss, reg_loss2)
        print "  abs error:", abserr(reg_loss, reg_loss2)
        print "  instance.ComputeLoss(soln):", reg_loss
        print "  loss(...):", reg_loss2
        training_helpers.print_params(params)
        # exit(0)

    # Compute GT score and check slack
    gt_score = reg_payoffs.ComputeScore(gt)
    margin = gt_score - reg_score  # this is the margin we're trying to maximize!
    if margin > reg_loss:
        # The ground truth might not be in the hypothesis class
        # (e.g. when the GT path extends beyond the grid bounds), so
        # the most-violated inference might find a constraint that has
        # slack lower than that for the ground truth. The slack for
        # the ground truth is always zero, so if the slack for the
        # solution that the DP found is negative then we replace it
        # with the ground truth. One way to think about it is that our
        # hypothesis class is {all representable manhattan models} +
        # {ground truth}, which we perform inference in by comparing
        # the best representable manhattan model (as found by DP) to
        # the ground truth. The problem here is that at test time
        # we're performing inference in the hypothesis class {all
        # representable manhattan models}. I don't know what the
        # impact of training and testing on these subtly different
        # hypothesis classes is.
        aug_soln = gt
        print "\n+++Negative slack, replacing with gt (slack=%f)" % (reg_loss - margin)
        # print '  Margin:',margin
        # print '  Loss:',reg_loss
        # print '  Slack:',reg_loss-margin

    # print '\n\nFinding most violated constraint'
    # print '  data weights: ',params.GetWeights()
    # print '  corner penalty:',params.GetCornerPenalty()
    # print '  occlusion penalty:',params.GetOcclusionPenalty()
    # print '  feature(true): ',gt_ftr
    # print '  feature(aug-soln): ',aug_ftr
    # print '  score(aug-soln): ',np.dot(list(model.w), aug_ftr)
    # print '  loss(aug-soln): ',gt.GetInstance().ComputeLoss(aug_soln)

    return aug_soln