示例#1
0
def getSatisfying(fs, domain):
    problem = Problem()
    all_variables = set(concat(map(lambda f: f.variables, fs)))
    map(lambda v: problem.addVariable(v, domain), all_variables)
    map(lambda f: problem.addConstraint(lambda *a: f(*a) > 0, f.variables), fs)
    useEverything = lambda *tiles: len(set(map(lambda tile: tile.tid, tiles))) == len(domain)
    problem.addConstraint(useEverything, all_variables)
    return problem.getSolutionIter()
def update_thetaSat(hard_sfwhn ,sfwhn, mcmcfx, scorefx, objfx, fifx, exp_objfx, makeFGfx, getasnfx, ex_elt_grids, syn_elt_grid, syn_dict_asn, stepsize=0.05):
    ex_factors_list = map( lambda ex: (getasnfx(ex), makeFGfx(hard_sfwhn, sfwhn, ex)[1]), ex_elt_grids)
    ex_factors_list = map(lambda (a, fs): map(lambda each_fs: (a, each_fs), fs), ex_factors_list)
    ex_factors_list = zip(*ex_factors_list)


    syn_hard_factors, syn_factors_list = makeFGfx(hard_sfwhn, sfwhn, syn_elt_grid)
    syn_factors = concat(syn_factors_list)

    curr_score = scorefx(syn_factors, syn_dict_asn)
    next_asn, next_score = mcmcfx(syn_hard_factors, syn_factors, syn_dict_asn)
    next_score = scorefx(syn_factors, next_asn)
    delta_score = curr_score - next_score

    next_obj = tuple(map(lambda fs: objfx(fs, next_asn), syn_factors_list))
    curr_obj = tuple(map(lambda fs: objfx(fs, syn_dict_asn), syn_factors_list))
    next_fi = tuple(map(lambda fs: fifx(fs, next_asn), syn_factors_list))
    curr_fi = tuple(map(lambda fs: fifx(fs, syn_dict_asn), syn_factors_list))

    exp_obj = tuple(map(lambda fs: exp_objfx(fs), ex_factors_list))

    grad = vec.sub(curr_fi, next_fi)
    objective_next = vec.neg(vec.cabs(vec.sub(exp_obj, next_obj)))
    objective_curr = vec.neg(vec.cabs(vec.sub(exp_obj, curr_obj)))
    delta_objtive = sum(vec.sub(objective_curr, objective_next))

    next_theta = get_thetas(sfwhn)

    for i in range(len(grad)):
        if delta_score < 0 and delta_objtive > 0:
            print "***************************************increase theta"
            next_theta[i] += stepsize * grad[i]
        elif delta_score > 0 and delta_objtive < 0:
            print "***************************************decrease theta"
            next_theta[i] -= stepsize * grad[i]

    new_param_features = set_thetas(sfwhn, next_theta)
    return next_asn, new_param_features, sum(objective_next)
def schedule_temp_linear(startTemp, endTemp = 1, repeat = 10, Nsamples = 5000):
    diff = int(float(Nsamples)/float(repeat))
    betas = map(lambda t:  (1-t)*startTemp + (t)*endTemp, map(lambda i: i/float(diff), range(1, diff+1)))
    return concat(map(lambda b: [b]*repeat, betas))