Beispiel #1
0
def classify_example(instance, model, sparm):
    """Given a pattern x, return the predicted label."""

    params = training_helpers.create_params_from_psi(list(model.w))
    payoffs = py_indoor_context.DPPayoffs()
    FtrMgr.LoadFeaturesFor(instance)
    FtrMgr.CompileFeatures(params, payoffs)
    soln = Inference.Solve(instance, payoffs)
    return soln
def classify_example(instance, model, sparm):
    """Given a pattern x, return the predicted label."""

    params = training_helpers.create_params_from_psi(list(model.w))
    payoffs = py_indoor_context.DPPayoffs()
    FtrMgr.LoadFeaturesFor(instance)
    FtrMgr.CompileFeatures(params, payoffs)
    soln = Inference.Solve(instance, payoffs)
    return soln
def print_iteration_stats(ceps, cached_constraint, sample, model, cset, alpha, sparm):
    """Called just before the end of each cutting plane iteration.

    This is called just before the end of each cutting plane
    iteration, primarily to print statistics.  The 'ceps' argument is
    how much the most violated constraint was violated by.  The
    'cached_constraint' argument is true if this constraint was
    constructed from the cache.
    
    The default behavior is that nothing is printed."""

    # generate a report
    params = training_helpers.create_params_from_psi(list(model.w))
    Reporter.add_iteration(params)
Beispiel #4
0
def print_iteration_stats(ceps, cached_constraint, sample, model, cset, alpha,
                          sparm):
    """Called just before the end of each cutting plane iteration.

    This is called just before the end of each cutting plane
    iteration, primarily to print statistics.  The 'ceps' argument is
    how much the most violated constraint was violated by.  The
    'cached_constraint' argument is true if this constraint was
    constructed from the cache.
    
    The default behavior is that nothing is printed."""

    # generate a report
    params = training_helpers.create_params_from_psi(list(model.w))
    Reporter.add_iteration(params)
Beispiel #5
0
def print_learning_stats(sample, model, cset, alpha, sparm):
    """Print statistics once learning has finished.
    
    This is called after training primarily to compute and print any
    statistics regarding the learning (e.g., training error) of the
    model on the training sample.  You may also use it to make final
    changes to model before it is written out to a file.  For example, if
    you defined any non-pickle-able attributes in model, this is a good
    time to turn them into a pickle-able object before it is written
    out.  Also passed in is the set of constraints cset as a sequence
    of (left-hand-side, right-hand-side) two-element tuples, and an
    alpha of the same length holding the Lagrange multipliers for each
    constraint.

    The default behavior is that nothing is printed."""

    params = training_helpers.create_params_from_psi(list(model.w))
    Reporter.add_iteration(params)  # add a final iteration
    Reporter.generate_report(params, extended=True)
def print_learning_stats(sample, model, cset, alpha, sparm):
    """Print statistics once learning has finished.
    
    This is called after training primarily to compute and print any
    statistics regarding the learning (e.g., training error) of the
    model on the training sample.  You may also use it to make final
    changes to model before it is written out to a file.  For example, if
    you defined any non-pickle-able attributes in model, this is a good
    time to turn them into a pickle-able object before it is written
    out.  Also passed in is the set of constraints cset as a sequence
    of (left-hand-side, right-hand-side) two-element tuples, and an
    alpha of the same length holding the Lagrange multipliers for each
    constraint.

    The default behavior is that nothing is printed."""

    params = training_helpers.create_params_from_psi(list(model.w))
    Reporter.add_iteration(params)  # add a final iteration
    Reporter.generate_report(params, extended=True)
Beispiel #7
0
def find_most_violated_constraint_margin(instance, gt, model, sparm):
    """Return ybar associated with x's most violated constraint.

    The find most violated constraint function for margin rescaling.
    The default behavior is that this returns the value from the
    general find_most_violated_constraint function."""

    #print '\n\nFinding most violated constraint'

    assert (isinstance(instance, py_indoor_context.TrainingInstance))
    assert (isinstance(gt, py_indoor_context.ManhattanHypothesis))

    params = training_helpers.create_params_from_psi(list(model.w))

    aug_payoffs = py_indoor_context.DPPayoffs()
    reg_payoffs = py_indoor_context.DPPayoffs()

    FtrMgr.LoadFeaturesFor(instance)
    FtrMgr.CompileWithLoss(params, instance, aug_payoffs)
    FtrMgr.Compile(params, reg_payoffs)

    # Solve augmented problem
    aug_soln = Inference.Solve(instance, aug_payoffs)
    score = aug_payoffs.ComputeScore(aug_soln)

    # Compute loss on regular problem
    reg_score = reg_payoffs.ComputeScore(aug_soln)
    reg_loss = instance.ComputeLoss(aug_soln)

    # Check the score
    check_score = reg_score + reg_loss
    if errcheck(score, check_score):
        print '\n*** Inconsistent score!'
        print '  rel error:', relerr(score, check_score)
        print '  abs error:', abserr(score, check_score)
        print '  Aug score:', score
        print '  Reg score:', reg_score
        print '  Loss:', reg_loss
        print '  Reg score + loss:', check_score
        print '  Error:', abs(score - check_score)
        training_helpers.print_params(params)
        #exit(0)

    # check the score another way
    ftr = training_helpers.get_feature(FtrMgr, instance, aug_soln)
    reg_score2 = np.dot(list(ftr), list(model.w))
    reg_loss2 = loss(gt, aug_soln, sparm)
    if errcheck(reg_score, reg_score2):
        print '\n*** Inconsistent score!'
        print '  rel error:', relerr(reg_score, reg_score2)
        print '  abs error:', abserr(reg_score, reg_score2)
        print '  ftr <dot> soln:', reg_score2
        print '  payoffs.ComputeScore(soln):', reg_score
        print '  Instance: %s:%d' % (instance.GetSequenceName(),
                                     instance.GetFrameId())
        print '  ftr:', ftr
        print '  model.w:', list(model.w)
        training_helpers.print_params(params)
        #exit(0)

    # check the loss
    if errcheck(reg_loss, reg_loss2):
        print '\n*** Inconsistent loss!'
        print '  rel error:', relerr(reg_loss, reg_loss2)
        print '  abs error:', abserr(reg_loss, reg_loss2)
        print '  instance.ComputeLoss(soln):', reg_loss
        print '  loss(...):', reg_loss2
        training_helpers.print_params(params)
        #exit(0)

    # Compute GT score and check slack
    gt_score = reg_payoffs.ComputeScore(gt)
    margin = gt_score - reg_score  # this is the margin we're trying to maximize!
    if (margin > reg_loss):
        # The ground truth might not be in the hypothesis class
        # (e.g. when the GT path extends beyond the grid bounds), so
        # the most-violated inference might find a constraint that has
        # slack lower than that for the ground truth. The slack for
        # the ground truth is always zero, so if the slack for the
        # solution that the DP found is negative then we replace it
        # with the ground truth. One way to think about it is that our
        # hypothesis class is {all representable manhattan models} +
        # {ground truth}, which we perform inference in by comparing
        # the best representable manhattan model (as found by DP) to
        # the ground truth. The problem here is that at test time
        # we're performing inference in the hypothesis class {all
        # representable manhattan models}. I don't know what the
        # impact of training and testing on these subtly different
        # hypothesis classes is.
        aug_soln = gt
        print '\n+++Negative slack, replacing with gt (slack=%f)' % (reg_loss -
                                                                     margin)
        #print '  Margin:',margin
        #print '  Loss:',reg_loss
        #print '  Slack:',reg_loss-margin

    #print '\n\nFinding most violated constraint'
    #print '  data weights: ',params.GetWeights()
    #print '  corner penalty:',params.GetCornerPenalty()
    #print '  occlusion penalty:',params.GetOcclusionPenalty()
    #print '  feature(true): ',gt_ftr
    #print '  feature(aug-soln): ',aug_ftr
    #print '  score(aug-soln): ',np.dot(list(model.w), aug_ftr)
    #print '  loss(aug-soln): ',gt.GetInstance().ComputeLoss(aug_soln)

    return aug_soln
def find_most_violated_constraint_margin(instance, gt, model, sparm):
    """Return ybar associated with x's most violated constraint.

    The find most violated constraint function for margin rescaling.
    The default behavior is that this returns the value from the
    general find_most_violated_constraint function."""

    # print '\n\nFinding most violated constraint'

    assert isinstance(instance, py_indoor_context.TrainingInstance)
    assert isinstance(gt, py_indoor_context.ManhattanHypothesis)

    params = training_helpers.create_params_from_psi(list(model.w))

    aug_payoffs = py_indoor_context.DPPayoffs()
    reg_payoffs = py_indoor_context.DPPayoffs()

    FtrMgr.LoadFeaturesFor(instance)
    FtrMgr.CompileWithLoss(params, instance, aug_payoffs)
    FtrMgr.Compile(params, reg_payoffs)

    # Solve augmented problem
    aug_soln = Inference.Solve(instance, aug_payoffs)
    score = aug_payoffs.ComputeScore(aug_soln)

    # Compute loss on regular problem
    reg_score = reg_payoffs.ComputeScore(aug_soln)
    reg_loss = instance.ComputeLoss(aug_soln)

    # Check the score
    check_score = reg_score + reg_loss
    if errcheck(score, check_score):
        print "\n*** Inconsistent score!"
        print "  rel error:", relerr(score, check_score)
        print "  abs error:", abserr(score, check_score)
        print "  Aug score:", score
        print "  Reg score:", reg_score
        print "  Loss:", reg_loss
        print "  Reg score + loss:", check_score
        print "  Error:", abs(score - check_score)
        training_helpers.print_params(params)
        # exit(0)

    # check the score another way
    ftr = training_helpers.get_feature(FtrMgr, instance, aug_soln)
    reg_score2 = np.dot(list(ftr), list(model.w))
    reg_loss2 = loss(gt, aug_soln, sparm)
    if errcheck(reg_score, reg_score2):
        print "\n*** Inconsistent score!"
        print "  rel error:", relerr(reg_score, reg_score2)
        print "  abs error:", abserr(reg_score, reg_score2)
        print "  ftr <dot> soln:", reg_score2
        print "  payoffs.ComputeScore(soln):", reg_score
        print "  Instance: %s:%d" % (instance.GetSequenceName(), instance.GetFrameId())
        print "  ftr:", ftr
        print "  model.w:", list(model.w)
        training_helpers.print_params(params)
        # exit(0)

    # check the loss
    if errcheck(reg_loss, reg_loss2):
        print "\n*** Inconsistent loss!"
        print "  rel error:", relerr(reg_loss, reg_loss2)
        print "  abs error:", abserr(reg_loss, reg_loss2)
        print "  instance.ComputeLoss(soln):", reg_loss
        print "  loss(...):", reg_loss2
        training_helpers.print_params(params)
        # exit(0)

    # Compute GT score and check slack
    gt_score = reg_payoffs.ComputeScore(gt)
    margin = gt_score - reg_score  # this is the margin we're trying to maximize!
    if margin > reg_loss:
        # The ground truth might not be in the hypothesis class
        # (e.g. when the GT path extends beyond the grid bounds), so
        # the most-violated inference might find a constraint that has
        # slack lower than that for the ground truth. The slack for
        # the ground truth is always zero, so if the slack for the
        # solution that the DP found is negative then we replace it
        # with the ground truth. One way to think about it is that our
        # hypothesis class is {all representable manhattan models} +
        # {ground truth}, which we perform inference in by comparing
        # the best representable manhattan model (as found by DP) to
        # the ground truth. The problem here is that at test time
        # we're performing inference in the hypothesis class {all
        # representable manhattan models}. I don't know what the
        # impact of training and testing on these subtly different
        # hypothesis classes is.
        aug_soln = gt
        print "\n+++Negative slack, replacing with gt (slack=%f)" % (reg_loss - margin)
        # print '  Margin:',margin
        # print '  Loss:',reg_loss
        # print '  Slack:',reg_loss-margin

    # print '\n\nFinding most violated constraint'
    # print '  data weights: ',params.GetWeights()
    # print '  corner penalty:',params.GetCornerPenalty()
    # print '  occlusion penalty:',params.GetOcclusionPenalty()
    # print '  feature(true): ',gt_ftr
    # print '  feature(aug-soln): ',aug_ftr
    # print '  score(aug-soln): ',np.dot(list(model.w), aug_ftr)
    # print '  loss(aug-soln): ',gt.GetInstance().ComputeLoss(aug_soln)

    return aug_soln