def leapfrog(gradient_fx, x, u, epsilon):
    dU = gradient_fx(x)
    u_new = dict(map(lambda (k, v): (k, vec.sub(v, vec.scale(dU[k], epsilon*0.5))), u.items()))
    x = dict(map(lambda (k, v): (k, vec.add(x[k], vec.scale(v, epsilon))), u_new.items()))
    dU = gradient_fx(x)
    u_new = dict(map(lambda (k, v): (k, vec.sub(v, vec.scale(dU[k], epsilon*0.5))), u.items()))
    return x, u_new
def curve_normal(xy1, xy2, xy3=None):
    if xy3 == None:
        xy3 = xy2
        xy2 = vec.scale(vec.add(xy1, xy2), 0.5)

    if vec.colinear(vec.sub(xy2, xy1), vec.sub(xy3, xy2)):
        xy2 = (xy2[0] + gauss(0, 0.001), xy2[1] + gauss(0, 0.001))

    vec12 = vec.norm((xy2[0] - xy1[0], xy2[1] - xy1[1]))
    vec32 = vec.norm((xy2[0] - xy3[0], xy2[1] - xy3[1]))

    return vec.norm(vec.add(vec12, vec32))
Exemple #3
0
def curve_normal(xy1, xy2, xy3=None):
    if xy3 == None:
        xy3 = xy2
        xy2 = vec.scale(vec.add(xy1, xy2), 0.5)

    if vec.colinear(vec.sub(xy2, xy1), vec.sub(xy3, xy2)):
        xy2 = (xy2[0] + gauss(0, 0.001), xy2[1] + gauss(0, 0.001))

    vec12 = vec.norm((xy2[0] - xy1[0], xy2[1] - xy1[1]))
    vec32 = vec.norm((xy2[0] - xy3[0], xy2[1] - xy3[1]))

    return vec.norm(vec.add(vec12, vec32))
def leapfrog(gradient_fx, x, u, epsilon):
    dU = gradient_fx(x)
    u_new = dict(
        map(lambda (k, v): (k, vec.sub(v, vec.scale(dU[k], epsilon * 0.5))),
            u.items()))
    x = dict(
        map(lambda (k, v): (k, vec.add(x[k], vec.scale(v, epsilon))),
            u_new.items()))
    dU = gradient_fx(x)
    u_new = dict(
        map(lambda (k, v): (k, vec.sub(v, vec.scale(dU[k], epsilon * 0.5))),
            u.items()))
    return x, u_new
Exemple #5
0
def ccw(x, y, z):
    x = (x[0], x[1], 0)
    y = (y[0], y[1], 0)
    z = (z[0], z[1], 0)
    vec12 = vec.sub(y, x)
    vec32 = vec.sub(z, x)
    if vec.length(vec12) > 0 and vec.length(vec32) > 0:
        vec12 = vec.norm(vec12)
        vec32 = vec.norm(vec32)
        sign = -1 if vec.cross(vec12, vec32)[2] > 0 else 1
        return sign * vec.length(vec.cross(vec12, vec32))
    else:
        return 0
def ccw(x, y, z):
    x = (x[0], x[1], 0)
    y = (y[0], y[1], 0)
    z = (z[0], z[1], 0)
    vec12 = vec.sub(y, x)
    vec32 = vec.sub(z, x)
    if vec.length(vec12) > 0 and vec.length(vec32) > 0:
        vec12 = vec.norm(vec12)
        vec32 = vec.norm(vec32)
        sign = -1 if vec.cross(vec12, vec32)[2] > 0 else 1
        return sign * vec.length(vec.cross(vec12, vec32))
    else:
        return 0
Exemple #7
0
def update_thetaSat(hard_sfwhn ,sfwhn, mcmcfx, scorefx, objfx, fifx, exp_objfx, makeFGfx, getasnfx, ex_elt_grids, syn_elt_grid, syn_dict_asn, stepsize=0.05):
    ex_factors_list = map( lambda ex: (getasnfx(ex), makeFGfx(hard_sfwhn, sfwhn, ex)[1]), ex_elt_grids)
    ex_factors_list = map(lambda (a, fs): map(lambda each_fs: (a, each_fs), fs), ex_factors_list)
    ex_factors_list = zip(*ex_factors_list)


    syn_hard_factors, syn_factors_list = makeFGfx(hard_sfwhn, sfwhn, syn_elt_grid)
    syn_factors = concat(syn_factors_list)

    curr_score = scorefx(syn_factors, syn_dict_asn)
    next_asn, next_score = mcmcfx(syn_hard_factors, syn_factors, syn_dict_asn)
    next_score = scorefx(syn_factors, next_asn)
    delta_score = curr_score - next_score

    next_obj = tuple(map(lambda fs: objfx(fs, next_asn), syn_factors_list))
    curr_obj = tuple(map(lambda fs: objfx(fs, syn_dict_asn), syn_factors_list))
    next_fi = tuple(map(lambda fs: fifx(fs, next_asn), syn_factors_list))
    curr_fi = tuple(map(lambda fs: fifx(fs, syn_dict_asn), syn_factors_list))

    exp_obj = tuple(map(lambda fs: exp_objfx(fs), ex_factors_list))

    grad = vec.sub(curr_fi, next_fi)
    objective_next = vec.neg(vec.cabs(vec.sub(exp_obj, next_obj)))
    objective_curr = vec.neg(vec.cabs(vec.sub(exp_obj, curr_obj)))
    delta_objtive = sum(vec.sub(objective_curr, objective_next))

    next_theta = get_thetas(sfwhn)

    for i in range(len(grad)):
        if delta_score < 0 and delta_objtive > 0:
            print "***************************************increase theta"
            next_theta[i] += stepsize * grad[i]
        elif delta_score > 0 and delta_objtive < 0:
            print "***************************************decrease theta"
            next_theta[i] -= stepsize * grad[i]

    new_param_features = set_thetas(sfwhn, next_theta)
    return next_asn, new_param_features, sum(objective_next)
Exemple #8
0
def orientgauss(x1, x2):
    return gaussPDF(vec.sub(x1, x2), ORIENT_MEAN, ORIENT_VAR)
def orientgauss(x1, x2):
    return gaussPDF(vec.sub(x1, x2), ORIENT_MEAN, ORIENT_VAR)