Пример #1
0
def TrackVelocity(x, k, vmax, acmax, Ta):
    ''' compute the velocity at each point along the track (given
    already-computed curvatures) assuming a certain accelration profile '''
    v = np.minimum(np.abs(acmax / k)**0.5, vmax)

    # also compute arc distance between successive points in x given curvature
    # k; for now we'll just use the linear distance though as it's close enough
    s = np.abs(np.concatenate([x[1:] - x[:-1], x[:1] - x[-1:]]))

    va = 0
    T = 0
    vout = []

    # first pass is just to get the initial velocity
    # let's assume it's zero
    # for i in range(1, len(k)):
    #     va = va + (v[i] - va) / Ta

    for i in range(0, len(k)):
        a = (v[i] - va) / Ta  # acceleration
        dt = s[i] / (va + a/2)  # time to reach next waypoint
        va = np.minimum(va + dt * (v[i] - va) / Ta, v[i])
        T += dt
        vout.append(va)
    return np.array(vout), T
Пример #2
0
    def normalizing_flows(z_0, norm_flow_params):
        '''
        z_0: [n_samples, D]
        u: [D,1]
        w: [D,1]
        b: [1]
        '''

        current_z = z_0
        all_zs = []
        all_zs.append(z_0)
        for params_k in norm_flow_params:

            z_0_mean = params_k[0]
            a = np.abs(params_k[1])
            b = params_k[2]

            # m_x = -1. + np.log(1.+np.exp(np.dot(w.T,u)))
            # u_k = u + (m_x - np.dot(w.T,u)) *  (w/np.linalg.norm(w))

            # print (a.shape)

            # print (current_z.shape)
            z_0_mean = np.reshape(z_0_mean, [len(current_z[0])])

            h = 1./(a + np.abs(current_z - z_0_mean))
            term1 = b * h * (current_z - z_0_mean)
            current_z = current_z + term1

            all_zs.append(current_z)

        return current_z, all_zs
Пример #3
0
 def intersect(sa, sb, image):
     xlima, ylima = sa.bounding_boxes[image]
     xlimb, ylimb = sb.bounding_boxes[image]
     widtha, heighta = xlima[1] - xlima[0], ylima[1] - ylima[0]
     widthb, heightb = xlimb[1] - xlimb[0], ylimb[1] - ylimb[0]
     return (np.abs(xlima[0] - xlimb[0])*2 < (widtha + widthb)) and \
            (np.abs(ylima[0] - ylimb[0])*2 < (heighta + heightb))
Пример #4
0
def PhotometricError(iref, inew, R, T, points, D):
    # points is a tuple ([y], [x]); convert to homogeneous
    siz = iref.shape
    npoints = len(points[0])
    f = siz[1]  # focal length, FIXME
    Xref = np.vstack(((points[1] - siz[1]*0.5) / f,  # x
                      (siz[0]*0.5 - points[0]) / f,  # y (left->right hand)
                      np.ones(npoints)))             # z = 1
    # this is confusingly written -- i am broadcasting the translation T to
    # every column, but numpy broadcasting only works if it's rows, hence all
    # the transposes
    # print D * Xref
    Xnew = (np.dot(so3.exp(R), (D * Xref)).T + T).T
    # print Xnew
    # right -> left hand projection
    proj = Xnew[0:2] / Xnew[2]
    p = (-proj[1]*f + siz[0]*0.5, proj[0]*f + siz[1]*0.5)
    margin = 10  # int(siz[0] / 5)
    inwindow_mask = ((p[0] >= margin) & (p[0] < siz[0]-margin-1) &
                     (p[1] >= margin) & (p[1] < siz[1]-margin-1))
    npts_inw = sum(inwindow_mask)
    if npts_inw < 10:
        return 1e6, np.zeros(6 + npoints)
    # todo: filter points which are now out of the window
    oldpointidxs = (points[0][inwindow_mask],
                    points[1][inwindow_mask])
    newpointidxs = (p[0][inwindow_mask], p[1][inwindow_mask])
    origpointidxs = np.nonzero(inwindow_mask)[0]
    E = InterpolatedValues(inew, newpointidxs) - iref[oldpointidxs]
    # dE/dk ->
    # d/dk r_p^2 = d/dk (Inew(w(r, T, D, p)) - Iref(p))^2
    # = -2r_p dInew/dp dp/dw dw/dX dX/dk
    # = -2r_p * g(w(r, T, D, p)) * dw(r, T, D, p)
    # intensity gradients for each point
    Ig = InterpolatedGradients(inew, newpointidxs)
    # TODO: use tensors for this
    # gradients for R, T, and D
    gradient = np.zeros(6 + npoints)
    for i in range(npts_inw):
        # print 'newidx (y,x) = ', newpointidxs[0][i], newpointidxs[1][i]
        # Jacobian of w
        oi = origpointidxs[i]
        Jw = dw(Xref[0][oi], Xref[1][oi], D[oi], R, T)
        # scale back up into pixel space, right->left hand coords to get
        # Jacobian of p
        Jp = f * np.vstack((-Jw[1], Jw[0]))
        # print origpointidxs[i], 'Xref', Xref[:, i], 'Ig', Ig[:, i], \
        #     'dwdRz', Jw[:, 2], 'dpdRz', Jp[:, 2]
        # full Jacobian = 2*E + Ig * Jp
        J = np.sign(E[i]) * np.dot(Ig[:, i], Jp)
        # print '2 E[i]', 2*E[i], 'Ig*Jp', np.dot(Ig[:, i], Jp)
        gradient[:6] += J[:6]
        # print J[:6]
        gradient[6+origpointidxs[i]] += J[6]

    print R, T, np.sum(np.abs(E)), npts_inw
    # return ((0.2*(npoints - npts_inw) + np.dot(E, E)), gradient)
    return np.sum(np.abs(E)) / (npts_inw), gradient / (npts_inw)
Пример #5
0
def compare_deltas(baseline=None, candidate=None, abs_tol=1e-5, rel_tol=0.01):
  # TODO: maybe add relative tolerance check
  epsilon = 1e-25
  if baseline.shape != candidate.shape:
    return False
  diff_tensor = np.abs(baseline - candidate)
  rel_tensor1 = diff_tensor / (np.abs(baseline) + 1e-25)
  rel_tensor2 = diff_tensor / (np.abs(candidate) + 1e-25)
  max_error = np.max(diff_tensor)
  max_rel = max(np.max(rel_tensor1), np.max(rel_tensor2))
  if max_error > abs_tol and max_rel > rel_tol:
    return False
  else:
    return True
Пример #6
0
 def _add_penalty(self, loss, w):
     """Apply regularization to the loss."""
     if self.penalty == "l1":
         loss += self.C * np.abs(w[:-1]).sum()
     elif self.penalty == "l2":
         loss += (0.5 * self.C) * (w[:-1] ** 2).mean()
     return loss
Пример #7
0
def plot_projection(P, feature_names):
    d = {}
    import pandas as pd
    for (i,v) in enumerate(P.T):
        idxs = np.argsort(-np.abs(v))
        print 'v_%d' % i
        print pd.Series(v, index=feature_names).iloc[idxs].iloc[0:10]
Пример #8
0
	def loss_func(self, w):
		loss = 0.5 * np.mean((self.train_y_ - np.dot(self.train_x_, w))**2)
		if self.penalty_ == "l1": # Lasso
			loss += self.alpha_ * np.sum(np.abs(w[:-1]))
		elif self.penalty_ == "l2": # Ridge
			loss += 0.5 * self.alpha_ * np.mean(w[:-1]**2)
		return loss
Пример #9
0
def SVGPathToTrackPoints(fname, spacing=TRACK_SPACING):
    path = svgpathtools.parse_path(open(fname).read())

# input: svg based on a 100px / m image
    def next_t(path, t, dist):
        p = path.point(t)
        L = path.length()
        # t += 1.0 / np.abs(path.derivative(t))
        itr = 0
        while itr < 20:
            itr += 1
            p1 = path.point(t)
            err = np.abs(p1 - p) - dist
            d1 = path.derivative(t)
            if np.abs(err) < 1e-5:
                return t, p1, d1 / np.abs(d1)
            derr = np.abs(d1) * L
            # do a step in Newton's method (clipped because some of the
            # gradients in the curve are really small)
            t -= np.clip(err / derr, -1e-2, 1e-2)
            t = np.clip(t, 0, 1)
        return t, p, d1 / np.abs(d1)

    d0 = path.derivative(0)
    pts = [[path.point(0), d0 / np.abs(d0)]]
    t = 0
    while t < 1:
        t, p, d = next_t(path, t, spacing)
        pts.append([p, d])

    return pts
Пример #10
0
def GetTimeSeries():
    TS = []
    ERR = []
    GYRO = []
    ACCEL = []
    lastim = None
    t0 = None
    for msg in parse.ParseLog(open('../rustlerlog-BMPauR')):
        if msg[0] == 'img':
            _, ts, im = msg
            if t0 is None:
                t0 = ts
            im = GammaCorrect(im)
            if lastim is not None:
                TS.append(ts - t0)
                ERR.append(np.sum(np.abs(lastim-im)) / (320*240))
                GYRO.append(GYRO[-1])
                ACCEL.append(ACCEL[-1])
            lastim = im
        elif msg[0] == 'imu':
            _, ts, gyro, mag, accel = msg
            if t0 is None:
                t0 = ts
            TS.append(ts - t0)
            GYRO.append(gyro)
            ACCEL.append(accel)
            if len(ERR):
                ERR.append(ERR[-1])
            else:
                ERR.append(0)
    return np.array(TS), np.array(ERR), np.array(GYRO), np.array(ACCEL)
Пример #11
0
def soft_thr(x, lambdaPar, lower=None, upper=None):
    out = np.sign(x) * np.fmax(np.abs(x) - lambdaPar, 0)
    if (lower != None):
        out[out < lower] = 0.0
    if (upper != None):
        out[out > upper] = 0.0
    return out
Пример #12
0
def TrackNormal(x):
    xx = np.concatenate([x[-1:], x, x[:1]])
    p0 = xx[:-2]
    p2 = xx[2:]
    T = p2 - p0  # track derivative
    uT = np.abs(T)
    return T / uT
Пример #13
0
def initialize(deep_map, X,num_pseudo_params):
    smart_map = {}
    for layer,layer_map in deep_map.iteritems():
        smart_map[layer] = {}
        for unit,gp_map in layer_map.iteritems():
            smart_map[layer][unit] = {}
            cov_params = gp_map['cov_params']
            lengthscales = cov_params[1:]
            if layer == 0:
                pairs = itertools.combinations(X, 2)
                dists = np.array([np.abs(p1-p2) for p1,p2 in pairs])
                smart_lengthscales = np.array([np.log(np.median(dists[:,i])) for i in xrange(len(lengthscales))])
                kmeans = KMeans(n_clusters = num_pseudo_params, init = 'k-means++')
                fit = kmeans.fit(X)
                smart_x0 = fit.cluster_centers_
                #inds = npr.choice(len(X), num_pseudo_params, replace = False)
                #smart_x0 = np.array(X)[inds,:]
                smart_y0 = np.ndarray.flatten(smart_x0) 
                #smart_y0 = np.array(y)[inds]
                smart_noise_scale = np.log(np.var(smart_y0))
            else:
                smart_x0 = gp_map['x0']
                smart_y0 = np.ndarray.flatten(smart_x0[:,0])
                smart_lengthscales = np.array([np.log(1) for i in xrange(len(lengthscales))])
                smart_noise_scale = np.log(np.var(smart_y0))
            gp_map['cov_params'] = np.append(cov_params[0],smart_lengthscales)
            gp_map['x0'] = smart_x0
            gp_map['y0'] = smart_y0
            #gp_map['noise_scale'] = smart_noise_scale
            smart_map[layer][unit] = gp_map
    smart_params = pack_deep_params(smart_map)
    return smart_params
Пример #14
0
def test_abs():
    fun = lambda x: 3.0 * np.abs(x)
    d_fun = grad(fun)
    check_grads(fun, 1.1)
    check_grads(fun, -1.1)
    check_grads(d_fun, 1.1)
    check_grads(d_fun, -1.1)
Пример #15
0
def taylor_sine(x):
    ans = currterm = x
    i = 0
    while np.abs(currterm) > 0.001:
        currterm = -currterm * x ** 2 / ((2 * i + 3) * (2 * i + 2))
        ans = ans + currterm
        i += 1
    return ans
Пример #16
0
def fun(x):
    curr = x
    ans = curr
    for i in xrange(1000):
        curr = - curr * x**2 / ((2*i+3)*(2*i+2))
        ans = ans + curr
        if np.abs(curr) < 0.2: break
    return ans
Пример #17
0
 def next_t(path, t, dist):
     p = path.point(t)
     L = path.length()
     # t += 1.0 / np.abs(path.derivative(t))
     itr = 0
     while itr < 20:
         itr += 1
         p1 = path.point(t)
         err = np.abs(p1 - p) - dist
         d1 = path.derivative(t)
         if np.abs(err) < 1e-5:
             return t, p1, d1 / np.abs(d1)
         derr = np.abs(d1) * L
         # do a step in Newton's method (clipped because some of the
         # gradients in the curve are really small)
         t -= np.clip(err / derr, -1e-2, 1e-2)
         t = np.clip(t, 0, 1)
     return t, p, d1 / np.abs(d1)
Пример #18
0
def hard_thr(x, lambdaPar, lower=None, upper=None):
    out = np.copy(x)
    out[np.abs(x) < lambdaPar] = 0.0

    if (lower != None):
        out[out < lower] = 0.0
    if (upper != None):
        out[out > upper] = 0.0
    return out
Пример #19
0
 def callback(weights, iter):
     if iter % 10 == 0:
         print "max of weights", np.max(np.abs(weights))
         train_preds = undo_norm(pred_fun(weights, train_smiles))
         cur_loss = loss_fun(weights, train_smiles, train_targets)
         training_curve.append(cur_loss)
         print "Iteration", iter, "loss", cur_loss, "train RMSE", rmse(train_preds, train_raw_targets),
         if validation_smiles is not None:
             validation_preds = undo_norm(pred_fun(weights, validation_smiles))
             print "Validation RMSE", iter, ":", rmse(validation_preds, validation_raw_targets),
Пример #20
0
def test_pow():
    fun = lambda x, y : to_scalar(x ** y)
    d_fun_0 = lambda x, y : to_scalar(grad(fun, 0)(x, y))
    d_fun_1 = lambda x, y : to_scalar(grad(fun, 1)(x, y))
    make_positive = lambda x : np.abs(x) + 1.1 # Numeric derivatives fail near zero
    for arg1, arg2 in arg_pairs():
        arg1 = make_positive(arg1)
        check_grads(fun, arg1, arg2)
        check_grads(d_fun_0, arg1, arg2)
        check_grads(d_fun_1, arg1, arg2)
Пример #21
0
def fun(x):
    currterm = x
    ans = currterm
    for i in range(1000):
        print(i, end=' ')
        currterm = - currterm * x ** 2 / ((2 * i + 3) * (2 * i + 2))
        ans = ans + currterm
        if np.abs(currterm) < 0.2: break # (Very generous tolerance!)

    return ans
Пример #22
0
def magcal_residual(MAG, a, mb):
    """ residual from all observations given magnetometer eccentricity, bias,
    gyro bias, and gyro scale"""

    A = np.array([
        [a[0], a[1], a[2]],
        [0,    a[3], a[4]],
        [0,    0,    a[5]]
    ])

    mag = np.dot(MAG - mb, A)
    return np.mean(np.abs(1 - np.einsum('ji,ji->j', mag, mag)))
Пример #23
0
 def _apply_nonlinearity(self, nonlin, res):
     if nonlin == 'none' or nonlin == 'linear':
         return res
     if nonlin == 'relu':
         return 0.5 * (res + np.abs(res))
     if nonlin == 'tanh':
         return np.tanh(res)
     if nonlin == 'softmax':
         res -= res.max()
         res = np.exp(res)
         return res / res.sum()
     raise Exception('unknown nonlinearity: "%s"' % nonlin)
Пример #24
0
def TrackCurvature(x):
    # use quadratic b-splines at each point to estimate curvature
    # i get almost the same formula i had before but it's off by a factor of 4!

    xx = np.concatenate([x[-1:], x, x[:1]])
    p0 = xx[:-2]
    p1 = xx[1:-1]
    p2 = xx[2:]
    T = p2 - p0  # track derivative
    uT = np.abs(T)
    TT = 4*(p0 - 2*p1 + p2)
    k = (np.real(T)*np.imag(TT) - np.imag(T)*np.real(TT)) / (uT**3)
    return k
 def callback(weights, iter):
     if iter % 10 == 0:
         print "max of weights", np.max(np.abs(weights))
         # import pdb; pdb.set_trace()
         train_preds = undo_norm(pred_fun(weights, train_smiles[:num_print_examples]))
         cur_loss = loss_fun(weights, train_smiles[:num_print_examples], train_targets[:num_print_examples]) # V: refers to line number #78 i.e.
                                                                                                             # def loss_fun(weights, smiles, targets) of build_vanilla_net.py
         training_curve.append(cur_loss)
         print "Iteration", iter, "loss", cur_loss,\
               "train RMSE", rmse(train_preds, train_raw_targets[:num_print_examples]),
         if validation_smiles is not None:
             validation_preds = undo_norm(pred_fun(weights, validation_smiles))
             print "Validation RMSE", iter, ":", rmse(validation_preds, validation_raw_targets),
Пример #26
0
def gradient_descent(objFunc, w):
	dfunc = grad(objFunc)
	lrate = 0.000000001
	a = []
	for i in range(8000):
		a.append(objFunc(w))
		improv = lrate * dfunc(w)
		w = w - improv
		if i % 500 == 0:
			lrate = lrate/10.0
		if len(a) > 3 and (np.abs(a[-1] - a[-2]) < 0.00001):
			break
	return w,a
Пример #27
0
def neural_net_train(features,labels,num_iter = 2000,opt_method = 'forward_backward') :
    layer_sizes = [2,5,5,1]
    l2_reg = 2.0
    param_scale = 0.5

    init_params = neural_net_init(param_scale,layer_sizes)

    '''def plain_objective(params) :
        return lms_loss(params,features,labels,l2_reg)'''
    plain_objective = gen_objective(features,labels,l2_reg)
    objective_grad = auto_grad(plain_objective)

    print("          Iteration|      Train accuracy")

    optimized_params = init_params
    gd_step = 0.2
    for i in range(num_iter) :
        if opt_method == 'forward_backward' :
            optimized_params_ori = optimized_params
            value_old = plain_objective(optimized_params)
            flattened_grad,unflatten,x = flatten_func(objective_grad,optimized_params)
            x -= flattened_grad(x) * gd_step
            optimized_params = unflatten(x)
            value_new = plain_objective(optimized_params)
            if value_new < value_old :
                gd_step *= 1.618
            else :
                gd_step *= 0.618
                optimized_params = optimized_params_ori
        elif opt_method == 'stepest' :
            value_old = plain_objective(optimized_params)
            flattened_grad,unflatten,x = flatten_func(objective_grad,optimized_params)
            local_gd_step = gd_step
            best_gd_step = 0.0
            for j in range(10) :
                x_test = x - flattened_grad(x) * local_gd_step
                last_optimized_params = unflatten(x_test)
                value_new = plain_objective(last_optimized_params)
                if value_new < value_old :
                    best_gd_step = local_gd_step
                    local_gd_step *= 1.618
                else :
                    local_gd_step *= 0.618
            if auto_np.abs(best_gd_step - 0.0) < 0.00000000001 :
                gd_step *= 0.618
            x -= flattened_grad(x) * best_gd_step
            optimized_params = unflatten(x)
        print_perf(optimized_params,i,features,labels)

    return optimized_params
Пример #28
0
    def variational_log_density(params, samples):
        '''
        samples: [n_samples, D]
        u: [D,1]
        w: [D,1]
        b: [1]
        Returns: [num_samples]
        '''
        n_samples = len(samples)
        d = len(samples[0])

        mean = params[0]
        log_std = params[1]
        norm_flow_params = params[2]

        # print (samples.shape)

        # samples = sample_diag_gaussian(mean, log_std, num_samples, rs)
        z_k, all_zs = normalizing_flows(samples, norm_flow_params)

        logp_zk = logprob(z_k)
        logp_zk = np.reshape(logp_zk, [n_samples, 1])

        logq_z0 = diag_gaussian_log_density(samples, mean, log_std)
        logq_z0 = np.reshape(logq_z0, [n_samples, 1])

        sum_nf = np.zeros([n_samples,1])
        for params_k in range(len(norm_flow_params)):
            z_0_mean = norm_flow_params[params_k][0]
            a = np.abs(norm_flow_params[params_k][1])
            b = norm_flow_params[params_k][2]

            # m_x = -1. + np.log(1.+np.exp(np.dot(w.T,u)))
            # u_k = u + (m_x - np.dot(w.T,u)) *  (w/np.linalg.norm(w))

            # [n_samples, D]
            # phi = np.dot((1.-np.tanh(np.dot(all_zs[params_k],w)+b)**2), w.T)
            # [n_samples, 1]
            current_z = all_zs[params_k]

            z_0_mean = np.reshape(z_0_mean, [len(current_z[0])])

            h = 1./(a + np.abs(current_z - z_0_mean))
            h_prime = -1*(a+np.abs(current_z-z_0_mean))**2 * (np.abs(current_z)/current_z)

            term1 = (1+b*h)**(d-1)
            term2 = 1+ b * h + b * h_prime * np.abs(current_z-z_0_mean)
            term3 = term1 * term2

            sum_nf = np.log(np.abs(term3))
            sum_nf += sum_nf

        # return logq_z0 - sum_nf
        print (logq_z0.shape)
        log_qz = np.reshape(logq_z0 - sum_nf, [n_samples])
        return log_qz
Пример #29
0
def QuadFitCurvatureMap(x):
    curv = []

    for i in range(len(x)):
        # do a look-ahead quadratic fit, just like the car would do
        pts = x[(np.arange(6) + i) % len(x)] / 100  # convert to meters
        basis = (pts[1] - pts[0]) / np.abs(pts[1] - pts[0])

        # project onto forward direction
        pts = (np.conj(basis) * (pts - pts[0]))

        p = np.polyfit(np.real(pts), np.imag(pts), 2)
        curv.append(p[0] / 2)

    return np.float32(curv)
Пример #30
0
	def loss_func(self, w):
		prob = self.prob_func(w, self.train_x_)
		loss = np.array([0.0] * self.train_x_.shape[0])
		for c in xrange(len(self.classes_)):
			loss += np.equal(self.train_y_, c) * prob[:,c]
		score = -np.sum(np.log(loss))
		if self.penalty_ == "l1": # Lasso
			score += self.alpha_ * np.sum(np.abs(w))
		elif self.penalty_ == "l2": # Ridge
			# ===FIXME===
			# I don't know why scaler of L2 norm is "3.0"
			# according to definition of L2 norm, scaler must be "0.5"
			# but calc the same value with scikit-learn, scaler is "3.0"
			# (may be to avoid over learning?)
			score += 3.0 * self.alpha_ * np.mean(w**2)
		return score
Пример #31
0
    def eigdecomp(self, x, k):

        # self.hvp.update_x(x)
        # d, U = doublePass(self.hvp, self.Omega, k, s=1, check=False)

        J = self.J(x)
        if len(np.shape(J)) == 1:
            J = np.array([J])
        Gauss_Newton = np.dot(np.dot(J.T, self.Gamma_noise_inv), J)
        d, U = np.linalg.eigh(Gauss_Newton)

        sort_perm = np.abs(d).argsort()

        sort_perm = sort_perm[::-1]
        d = d[sort_perm[:k]]
        U = U[:, sort_perm[:k]]

        return d, U
Пример #32
0
def _get_dx_wrt(dx, var, dx_scaling, dx_func=None):
    """Scale `dx` for a particular variable `var`."""
    assert dx_scaling in [
        "none",
        "median",
        "custom",
    ], "`dx_scaling` must be 'none' or 'median'."
    if dx_scaling == "none":
        dx_wrt = dx
    elif dx_scaling == "median":
        median_var = np.nanmedian(var)
        if median_var == 0:
            dx_wrt = dx
        else:
            dx_wrt = dx * np.abs(median_var)
    elif dx_scaling == "custom":
        dx_wrt = dx_func(var)
    return dx_wrt
Пример #33
0
def one_of_K_code(arr):
    """
    Make a one-of-K coding out of the numpy array.
    For example, if arr = ([0, 1, 0, 2]), then return a 2d array of the form 
     [[1, 0, 0], 
      [0, 1, 0],
      [1, 0, 0],
      [0, 0, 1]]
    """
    U = np.unique(arr)
    n = len(arr)
    nu = len(U)
    X = np.zeros((n, nu))
    for i, u in enumerate(U):
        Ii = np.where(np.abs(arr - u) < 1e-8)
        #ni = len(Ii)
        X[Ii[0], i] = 1
    return X
Пример #34
0
def nsp_ik(goal, q, nesterov=True):
    def cost(q):
        objective = (goal - fk(q[0:3], l1))
        constraint = np.pi - q[2]

        return objective.T @ objective + \
               constraint.T * constraint

    def g(q):
        return fk(q[0:2], l1) - fk(q[3:], l2)

    grad_cost = grad(cost)

    constraint_jac = jacobian(g)

    alpha = 0.01
    nu = 0.9
    max_itr = 100

    v, n_itr = 0, 0
    while np.any(np.abs(cost(q)) > EPS):

        # get constraint jacobian
        G = constraint_jac(q)

        Ginv = np.linalg.pinv(G)

        I = np.identity(Ginv.shape[0])

        # matrix to projects into the null space of the constraint jacobian
        nsp = (I - G.T @ Ginv.T).T

        if nesterov:
            v = nu * v + alpha * grad_cost(q - nu * v)
            q -= nsp @ v
        else:
            q -= nsp @ (alpha * grad_cost(q))

        draw_arm(q[0:3], l1, c1, swap=False)
        draw_arm(q[3:], l2, c2, clear=False)

        n_itr += 1
        if n_itr > max_itr:
            break
Пример #35
0
def test_archaic_and_pairwisediffs():
    #logging.basicConfig(level=logging.DEBUG)
    theta = 1
    N_e = 1.0
    join_time = 1.0
    num_runs = 1000

    def logit(p):
        return np.log(p / (1. - p))

    def expit(x):
        return 1. / (1. + np.exp(-x))

    n_bases = 1000
    model = momi.DemographicModel(
        N_e, muts_per_gen=theta/4./N_e/n_bases)

    model.add_time_param(
        "sample_t", random.uniform(0.001, join_time-.001) / join_time,
        upper=join_time)
    model.add_size_param("N", 1.0)
    model.add_leaf("a", N="N")
    model.add_leaf("b", t="sample_t", N="N")
    model.move_lineages("a", "b", join_time)

    data = model.simulate_data(length=n_bases,
                               recoms_per_gen=0,
                               num_replicates=num_runs,
                               sampled_n_dict={"a": 2, "b": 2})

    model.set_data(data.extract_sfs(1),
                   use_pairwise_diffs=False,
                   mem_chunk_size=-1)

    true_params = np.array(list(model.get_params().values()))
    #model.set_x([logit(random.uniform(.001, join_time-.001) / join_time),
    model.set_params([
        logit(random.uniform(.001, join_time-.001) / join_time),
        random.uniform(-1, 1)],
                     scaled=True)
    res = model.optimize(method="trust-ncg", hessp=True)
    inferred_params = np.array(list(model.get_params().values()))

    assert np.max(np.abs(np.log(true_params / inferred_params))) < .2
Пример #36
0
    def update_Hess(H, new_x, prev_x, new_g, prev_g):
        if np.allclose(new_x, prev_x):
            return H

        s = new_x - prev_x
        y = new_g - prev_g
        sy = np.dot(s, y)
        Bs = np.linalg.solve(H, s)

        y_Bs = y - Bs
        if np.abs(np.dot(
                s, y_Bs)) < 1e-8 * np.linalg.norm(s) * np.linalg.norm(y_Bs):
            # skip SR1 update
            return H

        Hy = np.dot(H, y)
        s_Hy = s - Hy
        H = H + np.outer(s_Hy, s_Hy) / np.dot(s_Hy, y)
        return H
Пример #37
0
    def callback(weights, iter):
        if iter % 10 == 0:
            print("Iteration {}".format(iter))
            print("\tmax of weights: {}".format(np.max(np.abs(weights))))

            cur_loss = loss_fun(weights, train_smiles, train_targets)
            training_curve.append(cur_loss)
            print("\tloss {}".format(cur_loss))

            train_preds = undo_norm(pred_fun(weights, train_smiles))
            print("\ttrain {}: {}".format(
                nll_func_name, nll_func(train_preds, train_raw_targets)))

            if validation_smiles is not None:
                validation_preds = undo_norm(
                    pred_fun(weights, validation_smiles))
                print("\tvalidation {}: {}".format(
                    nll_func_name,
                    nll_func(validation_preds, validation_raw_targets)))
Пример #38
0
    def test_z_update(self):
        # our manual update
        test_z_update = dp.z_update(mu, mu2, x, e_info_x, e_log_v, e_log_1mv)

        # autograd update
        get_auto_z_update = grad(dp.e_loglik_full, 6)
        auto_z_update = get_auto_z_update(
                x, mu, mu2, tau, e_log_v, e_log_1mv, e_z,
                e_info_x, e_logdet_info_x, prior_mu,
                prior_inv_wishart_scale, kappa, prior_dof, alpha)

        log_const = sp.misc.logsumexp(auto_z_update, axis = 1)
        auto_z_update = np.exp(auto_z_update - log_const[:, None])

        #print(auto_z_update[0:5, :])
        #print(test_z_update[0:5, :])

        self.assertTrue(\
                np.sum(np.abs(auto_z_update - test_z_update)) <= 10**(-8))
Пример #39
0
def Fundamental(Y):
    # initial guess: use peak of FFT to get to within one sample period
    T = float(len(Y)) / np.argmax(np.abs(np.fft.fft(Y)[:2000]))

    # (not really necessary, but why not) refine
    def mag(T):
        w = np.arange(0, len(Y)) * 2 * np.pi / T
        x = Y * np.exp(1j * w)  # dx/dw = j Y exp(jw)
        # dx*/dw = -j Y exp(-jw)
        # d/dw = x* dx/dw + x dx*/dw
        #      = Y exp(1j*w)
        return np.abs(np.sum(x))

    # print T, mag(T)
    # print T+0.1, mag(T+0.1)
    # print T-0.1, mag(T-0.1)
    # print round(T), mag(round(T))

    return round(T)
Пример #40
0
 def callback(weights, iter):
     if iter % 10 == 0:
         print("max of weights", np.max(np.abs(weights)))
         train_preds = undo_norm(
             pred_fun(weights, train_smiles[:num_print_examples]))
         cur_loss = loss_fun(weights, train_smiles[:num_print_examples],
                             train_targets[:num_print_examples])
         training_curve.append(cur_loss)
         print("Iteration", iter, "loss", cur_loss,\
               "train RMSE", rmse(train_preds, train_raw_targets[:num_print_examples]),)
         if validation_smiles is not None:
             validation_preds = undo_norm(
                 pred_fun(weights, validation_smiles))
             print(
                 "Validation RMSE",
                 iter,
                 ":",
                 rmse(validation_preds, validation_raw_targets),
             )
Пример #41
0
def erf(x):
    # constants
    a1 = 0.254829592
    a2 = -0.284496736
    a3 = 1.421413741
    a4 = -1.453152027
    a5 = 1.061405429
    p = 0.3275911

    # Save the sign of x
    sign = np.sign(x)
    x = np.abs(x)

    # A&S formula 7.1.26
    t = 1.0 / (1.0 + p * x)
    y = 1.0 - ((((
        (a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-x**2)

    return sign * y
 def variational_objective(params, t):
     """Provides a stochastic estimate of the variational lower bound."""
     W, U, B = unpack_params(params)
     z0 = np.random.multivariate_normal(np.zeros(dim_z), np.eye(dim_z),
                                        num_samples)
     z_prev = z0
     sum_log_det_jacob = 0.
     for k in range(K):
         w, u, b = W[k], U[k], B[k]
         u_hat = (m(np.dot(w, u)) - np.dot(w, u)) * (w /
                                                     np.linalg.norm(w)) + u
         affine = np.outer(h_prime(np.matmul(z_prev, w) + b), w)
         sum_log_det_jacob += np.log(eps + np.abs(1 + np.matmul(affine, u)))
         z_prev = z_prev + np.outer(h(np.matmul(z_prev, w) + b), u_hat)
     z_K = z_prev
     log_q_K = -0.5 * np.sum(np.log(2 * np.pi) + z0**2,
                             1) - sum_log_det_jacob
     log_p = np.log(eps + target(z_K))
     return np.mean(log_q_K - log_p)
Пример #43
0
def propagate(
    co2dict,
    uncertainties_into,
    uncertainties_from,
    totals=None,
    equilibria_in=None,
    equilibria_out=None,
    dx=1e-6,
    dx_scaling="median",
    dx_func=None,
):
    """Propagate uncertainties from requested inputs to outputs."""
    co2derivs = forward(
        co2dict,
        uncertainties_into,
        uncertainties_from,
        totals=totals,
        equilibria_in=equilibria_in,
        equilibria_out=equilibria_out,
        dx=dx,
        dx_scaling=dx_scaling,
        dx_func=dx_func,
    )[0]
    npts = np.shape(co2dict["PAR1"])
    uncertainties_from = engine.condition(uncertainties_from, npts=npts)[0]
    components = {
        u_into: {
            u_from: np.abs(co2derivs[u_into][u_from]) * v_from
            for u_from, v_from in uncertainties_from.items()
        }
        for u_into in uncertainties_into
    }
    uncertainties = {
        u_into: np.sqrt(
            np.sum(
                np.array([
                    component for component in components[u_into].values()
                ])**2,
                axis=0,
            ))
        for u_into in uncertainties_into
    }
    return uncertainties, components
Пример #44
0
def _rf_epg(alpha, phi):
    if (np.abs(alpha) > 2 * np.pi):
        warnings.warn("alpha should be in radians", warnings.UserWarning)

    a = np.power(np.cos(alpha / 2.), 2)
    b = np.exp(2 * 1j * phi) * np.power(np.sin(alpha / 2.), 2)
    c = -1j * np.exp(1j * phi) * np.sin(alpha)

    d = np.exp(-2j * phi) * np.power(np.sin(alpha / 2.), 2)
    e = np.power(np.cos(alpha / 2.), 2)
    f = 1j * np.exp(-1j * phi) * np.sin(alpha)

    g = -1j / 2. * np.exp(-1j * phi) * np.sin(alpha)
    h = 1j / 2 * np.exp(1j * phi) * np.sin(alpha)
    i = np.cos(alpha)

    R = np.array([[a, b, c], [d, e, f], [g, h, i]])

    return R
Пример #45
0
def DiederichOpper_II(N, patterns, weights, biases, sc, lr, tol):
    '''
    rule II described in Diederich and Opper in (1987) Learning of Correlated Patterns in Spin-Glass Networks by Local Learning Rules
    '''
    for i in range(N):  # for each neuron independently
        for j in range(patterns.shape[0]):
            pattern = np.array(deepcopy(patterns[j].reshape(1, N))).squeeze()
            h_i = (weights[i, :] @ pattern.T + biases[i])
            # if the  new pattern is not already stable with margin 1
            while (np.abs(1 - h_i * pattern[i])) > tol:
                weights[i, :] = deepcopy(weights[i, :] +
                                         lr * pattern[i] * pattern *
                                         (1 - h_i * pattern[i]))
                biases[i] = deepcopy(biases[i] +
                                     lr * pattern[i]) * (1 - h_i * pattern[i])
                if sc == True:
                    weights[i, i] = 0
                h_i = (weights[i, :] @ pattern.T + biases[i])
    return weights, biases
Пример #46
0
def ess_compute_Z(diagonal, num_peds, robot_mu_x, robot_mu_y, \
                  ped_mu_x, ped_mu_y, cov_robot_x, cov_robot_y, \
                  inv_cov_robot_x, inv_cov_robot_y, cov_ped_x, cov_ped_y, \
                  inv_cov_ped_x, inv_cov_ped_y, \
                  one_over_cov_sum_x, one_over_cov_sum_y, normalize):
    delta0 = [0. for _ in range(num_peds)]
    norm_delta0 = [0. for _ in range(num_peds)]
    norm_delta0_normalized = [0. for _ in range(num_peds)]
    T = np.size(robot_mu_x)

    # for var in range(np.size(var_x_ess)):
    for ped in range(num_peds):
        # if normalize == True:
        #   normalize_x = np.multiply(np.power(2*np.pi,-0.5), one_over_std_sum_x)
        #   normalize_y = np.multiply(np.power(2*np.pi,-0.5), one_over_std_sum_y)
        # else:
        normalize_x = 1.
        normalize_y = 1.

        vel_x = robot_mu_x - ped_mu_x[ped]
        vel_y = robot_mu_y - ped_mu_y[ped]
        vel_x_2 = np.power(vel_x, 2)
        vel_y_2 = np.power(vel_y, 2)

        one_over_var_sum_x = np.diag(one_over_cov_sum_x[ped])
        one_over_var_sum_y = np.diag(one_over_cov_sum_y[ped])

        quad_x = np.multiply(one_over_var_sum_x, vel_x_2)
        quad_y = np.multiply(one_over_var_sum_y, vel_y_2)

        Z_x = np.multiply(normalize_x, np.exp(-0.5 * quad_x))
        Z_y = np.multiply(normalize_y, np.exp(-0.5 * quad_y))

        Z = np.multiply(Z_x, Z_y)

        norm_delta0[ped] = np.abs(np.sum(np.log1p(-Z)))

    norm_delta0_normalized = norm_delta0 / (np.sum(norm_delta0))
    ess = 1. / np.sum(np.power(norm_delta0_normalized, 2))
    ess = np.int(ess)
    top_Z_indices = np.argsort(norm_delta0_normalized)[::-1]

    return ess, top_Z_indices
Пример #47
0
    def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a one-dimensional length-k array of variances
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != len(variances):
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances
    def compute_f_fprime_t_avg_12_(W1,W2,perturbation,max_dist=1,burn_in=0.5): # max dist added 10/14/20
        #Wmx,Wmy,Wsx,Wsy,s02,K,kappa,T,XX,XXp,Eta,Xi,h1,h2 = parse_W(W)
        Wmx,Wmy,Wsx,Wsy,s02,K,kappa,T,h1,h2,bl,amp = parse_W1(W1)
        XX,XXp,Eta,Xi = parse_W2(W2)
        fval = compute_f_(Eta,Xi,s02)
        fprimeval = compute_fprime_(Eta,Xi,s02)
        if share_residuals:
            resEta = Eta - u_fn(XX,fval,Wmx,Wmy,K,kappa,T)
            resXi  = Xi - u_fn(XX,fval,Wsx,Wsy,K,kappa,T)
            resEta12 = np.concatenate((resEta,resEta),axis=0)
            resXi12 = np.concatenate((resXi,resXi),axis=0)
        else:
            resEta12 = 0
            resXi12 = 0
        dHH = np.zeros((nN,nQ*nS*nT))
        dHH[:,np.arange(2,nQ*nS*nT,nQ)] = 1
        dHH = np.concatenate((dHH*h1,dHH*h2),axis=0)
        YY = fval + perturbation
        YYp = fprimeval
        XX12 = np.concatenate((XX,XX),axis=0)
        YY12 = np.concatenate((YY,YY),axis=0)
        YYp12 = np.concatenate((YYp,YYp),axis=0)
        YYmean = np.zeros_like(YY12)
        YYprimemean = np.zeros_like(YY12)
        def dYYdt(YY,Eta1,Xi1):
            return -YY + compute_f_(Eta1,Xi1,s02)
        def dYYpdt(YYp,Eta1,Xi1):
            return -YYp + compute_fprime_(Eta1,Xi1,s02)
        for t in range(niter):
            if np.mean(np.abs(YY-fval)) < max_dist:
                Eta121 = resEta12 + u_fn(XX12,YY12,Wmx,Wmy,K,kappa,T) + dHH
                Xi121 = resXi12 + u_fn(XX12,YY12,Wmx,Wmy,K,kappa,T)
                YY12 = YY12 + dt*dYYdt(YY12,Eta121,Xi121)
                YYp12 = YYp12 + dt*dYYpdt(YYp12,Eta121,Xi121)
            elif np.remainder(t,500)==0:
                print('unstable fixed point?')
            if t>niter*burn_in:
                YYmean = YYmean + 1/niter/burn_in*YY12
                YYprimemean = YYprimemean + 1/niter/burn_in*YYp12

        #YYmean = YYmean + np.tile(bl,nS*nT)[np.newaxis,:]
        
        return YYmean,YYprimemean
Пример #49
0
    def calc_loss_wrt_parameter_dict(self, param_dict, data_tuple):
        ''' Compute loss at given parameters

        Args
        ----
        param_dict : dict
            Keys are string names of parameters
            Values are *numpy arrays* of parameter values

        Returns
        -------
        loss : float scalar
        '''
        # TODO compute loss
        y_N = data_tuple[2]
        yhat_N = self.predict(data_tuple[0], data_tuple[1], **param_dict)
        #loss_total = ag_np.sum(ag_np.square(yhat_N - y_N))
        loss_total = ag_np.sum(ag_np.abs(yhat_N - y_N))
        return loss_total
Пример #50
0
def theta_sample(alpha, batch, lr=.2, EM_iter=EM_iter, SGD_iter=SGD_iter):
    """
    Returns mc random samples of the posterior mean estimator, for doing Monte Carlo approx.
    This uses EM algorithm with _EM_iter_ iterations to converge to approx posterior.
    The output is an matrix of _d_ sampled means, times _mc_ samples. Shape = (d, mc)
    lr must be maximum 0.25 otherwise explosion occurs with reparam
    """
    if model_name == "jaakkola":
        lv1 = np.abs(np.random.normal(0, 1, batch[1].shape))  # POSITIVE initial value of lambda(v) before maximization, same shape as Y
        for i in range(EM_iter):  # EM algorithm updating (mu_P,S_P) and v in turn.
            S_P1 = S_P(alpha, batch, lv1)  # Cov matrix (d x d)
            mu_P1 = mu_P(alpha, batch, S_P1)  # Mean vector (d x 1)
            lv1 = lambda_v(batch, mu_P1, S_P1)  # lambda(v) vector (n x 1)
        return np.random.multivariate_normal(mu_P1.reshape(-1), S_P1, mc).T

    if model_name == "SVI": # Prior MUST BE N(0,I)
        mu_P1 = np.copy(mu_0) # init posterior = prior N(0,I)
        rho_P1 = reparam_bwd(sigma_0) # init posterior = prior N(0,I)

        gradients_mu = np.empty((d,SGD_iter-1)) # init plotting

        for j in range(1, SGD_iter):
            epsilon = np.random.multivariate_normal(np.zeros(d), np.identity(d)).reshape(-1, 1) # generate noise ~ N(0,I)
            theta = np.nan_to_num( mu_P1 + reparam_fwd(rho_P1.reshape(-1,1)) * epsilon ) # nan to num is used to fix Autograd bug with sqrt(0)
            df_dthetha1 = np.nan_to_num( df_dtheta(theta, alpha, batch, mu_P1, rho_P1) )
            grad_mu_P = np.nan_to_num( df_dthetha1 + df_dmu_P(theta, alpha, batch, mu_P1, rho_P1) )
            grad_rho_P = np.nan_to_num( df_dthetha1 * (epsilon/(1 + np.exp(-rho_P1.reshape(-1,1)))) + df_drho_P(theta, alpha, batch, mu_P1, rho_P1).reshape(-1,1) )

            mu_P1  -= lr/np.sqrt(j) * grad_mu_P # gradient descent
            rho_P1 -= lr/np.sqrt(j) * np.squeeze(grad_rho_P)

            # Plotting the SGD
            gradients_mu[:,j-1] = np.squeeze(mu_P1)

        if(False and alpha==0):
            plt.plot(gradients_mu.T)
            plt.xlabel("iteration")
            plt.ylabel("mu_P's values")
            plt.savefig("../plots/SGD_SVI.png")
            plt.show()
            plt.clf()

        return np.random.multivariate_normal(mu_P1.reshape(-1), np.diag(reparam_fwd(rho_P1)**2), mc).T
    def get_log_pc(self, v):
        logit_v = np.log(v) - np.log(1 - v)
        epsilon = self.epsilon_param
        if np.abs(epsilon) < 1e-8:
            return self.get_log_p0(v)

        if self.gustafson_style:
            log_epsilon = np.log(epsilon)
            return \
                self.get_log_p0(v) + \
                self.log_phi(logit_v) + \
                log_epsilon - \
                self.log_norm_pc
        else:
            # assert epsilon <= 1
            return \
                self.get_log_p0(v) + \
                epsilon * self.log_phi(logit_v) - \
                self.log_norm_pc
Пример #52
0
    def plot_reverse_animation(self, fig, ax, x, y, z, threshold=1e-5, MAX=1000):
        frames = 48

        current_angle = self.get_angle()

        # Update using reverse kinematic
        self.update_reverse_kinematic(x, y, z, threshold, MAX)
        new_angle = self.get_angle()

        # Angle change by frame
        diff_angle = new_angle - current_angle
        for i in range(len(diff_angle)):
            if np.abs(diff_angle[i]) > np.pi:
                if diff_angle[i] > 0:
                    diff_angle[i] = diff_angle[i] - PI2
                else:
                    diff_angle[i] = PI2 + diff_angle[i]

        change_angle = diff_angle / frames

        lines = self.plot_arm(ax, current_angle)

        def get_plot(i):
            plot_angle = current_angle + i * change_angle
            first = self.get_arm_pos(1, *plot_angle)
            second = self.get_arm_pos(2, *plot_angle)
            third = self.get_arm_pos(3, *plot_angle)

            lines[0].set_data(*list(zip([0,0], first[:2])))
            lines[0].set_3d_properties([0, first[2]])

            lines[1].set_data(*list(zip(first[:2], second[:2])))
            lines[1].set_3d_properties([first[2], second[2]])

            lines[2].set_data(*list(zip(second[:2], third[:2])))
            lines[2].set_3d_properties([second[2], third[2]])

            return lines

        anim = animation.FuncAnimation(fig, get_plot, frames=48, interval=100, blit=True)

        return anim
Пример #53
0
    def variational_log_density(params, samples):
        '''
        samples: [n_samples, D]
        u: [D,1]
        w: [D,1]
        b: [1]
        Returns: [num_samples]
        '''
        n_samples = len(samples)

        mean = params[0]
        log_std = params[1]
        norm_flow_params = params[2]

        z_k, all_zs = normalizing_flows(samples, norm_flow_params)

        logp_zk = logprob(z_k)
        logp_zk = np.reshape(logp_zk, [n_samples, 1])

        logq_z0 = diag_gaussian_log_density(samples, mean, log_std)
        logq_z0 = np.reshape(logq_z0, [n_samples, 1])

        sum_nf = np.zeros([n_samples,1])
        for params_k in range(len(norm_flow_params)):
            u = norm_flow_params[params_k][0]
            w = norm_flow_params[params_k][1]
            b = norm_flow_params[params_k][2]

            # Appendix equations
            m_x = -1. + np.log(1.+np.exp(np.dot(w.T,u)))
            u_k = u + (m_x - np.dot(w.T,u)) *  (w/np.linalg.norm(w))
            # u_k = u

            # [n_samples, D]
            phi = np.dot((1.-np.tanh(np.dot(all_zs[params_k],w)+b)**2), w.T)
            # [n_samples, 1]
            sum_nf = np.log(np.abs(1+np.dot(phi, u_k)))
            sum_nf += sum_nf

        # return logq_z0 - sum_nf
        log_qz = np.reshape(logq_z0 - sum_nf, [n_samples])
        return log_qz
Пример #54
0
def print_wf_values(theta1=0.0, theta2=0.0,  use_j=False, B=0.0):
    wf = Wavefunction(use_jastrow=use_j)

    # Adjust numpy output so arrays are printed with higher precision
    float_formatter = "{:.15g}".format
    np.set_printoptions(formatter={'float_kind':float_formatter})

    if use_j:
        VP = np.array([theta1, theta2, B])
        print("Values for theta = ",theta1,theta2," and jastrow B = ",B)
    else:
        VP = np.array([theta1, theta2])
        print("Values for theta = ",theta1,theta2," and no jastrow")



    r1 = np.array([1.0, 2.0, 3.0])
    r2 = np.array([0.0, 1.1, 2.2])

    psi_val = wf.psi(r1, r2, VP)
    print("  wf = ",psi_val," log wf = ",np.log(np.abs(psi_val)))

    g0 = wf.grad0(r1, r2, VP)/psi_val
    print("  grad/psi for particle 0 = ",g0[0],g0[1],g0[2])

    # Using the laplacian of log psi to match internal QMCPACK values
    lap_0 = wf.lap0(r1, r2, VP)
    print(" laplacian of log psi for particle 0 = ",lap_0)

    lap_1 = wf.lap1(r1, r2, VP)
    print(" laplacian for log psi particle 1 = ",lap_1)

    eloc = wf.local_energy(r1, r2, VP)
    print("  local energy = ",eloc)

    dp = wf.dpsi(r1, r2, VP)
    print("  parameter derivative of log psi = ",dp / psi_val)

    deloc = wf.dlocal_energy(r1, r2, VP)
    print("  parameter derivative of local energy = ",deloc)

    print("")
Пример #55
0
    def run_cavi(self, tau, nu, phi_mu, phi_var, max_iter=200, tol=1e-6):
        params = packing.flatten_params(tau, nu, phi_mu, phi_var)

        self.trace.reset()
        diff = np.float('inf')
        while diff > tol and self.trace.stepnum < max_iter:
            self.cavi_updates(tau, nu, phi_mu, phi_var)
            new_params = packing.flatten_params(tau, nu, phi_mu, phi_var)
            diff = np.max(np.abs(new_params - params))
            self.trace.update(params, diff)
            if not np.isfinite(diff):
                print('Error: non-finite parameter difference.')
                break
            params = new_params

        if self.trace.stepnum >= max_iter:
            print('Warning: CAVI reached max_iter.')

        print('Done with CAVI.')
        return tau, nu, phi_mu, phi_var
Пример #56
0
 def predict(self, test_x, is_diag=1):
     sn2 = np.exp(self.theta[0])
     hyp = self.theta[1:]
     tmp = self.kernel(test_x, self.train_x, hyp)
     py = np.dot(tmp, self.alpha)
     '''
     ps2 = sn2 + self.kernel(test_x, test_x, hyp) - np.dot(tmp, chol_inv(self.L, tmp.T))
     if is_diag:
         ps2 =np.diag(ps2)
     '''
     tmp1 = chol_inv(self.L, tmp.T)
     # ps2 = -np.dot(tmp, chol_inv(self.L, tmp.T))
     if is_diag:
         ps2 = self.for_diag + sn2 - (tmp * tmp1.T).sum(axis=1)
     else:
         ps2 = sn2 - np.dot(tmp, tmp1) + self.kernel(test_x, test_x, hyp)
     ps2 = np.abs(ps2)
     py = py * self.std + self.mean
     ps2 = ps2 * (self.std**2)
     return py, ps2
Пример #57
0
    def __init__(self, n, penalty='huber', alpha=1.0):
        assert (alpha > 0.0)
        self.alpha = alpha
        self.alpha_sq = alpha ** 2
        self.penalty = penalty.lower()
        if (self.penalty == 'quadratic'):
            self.phi = lambda z: 0.5 * np.power(z, 2.0)
        elif (self.penalty == 'pseudo-huber'):
            self.phi = lambda z: self.alpha_sq * (np.sqrt(1.0 + np.power(z, 2.0) / self.alpha_sq) - 1.0)
        elif (self.penalty == 'huber'):
            self.phi = lambda z: np.where(np.abs(z) <= alpha, 0.5 * np.power(z, 2.0), alpha * np.abs(z) - 0.5 * self.alpha_sq)
        elif (self.penalty == 'welsch'):
            self.phi = lambda z: 1.0 - np.exp(-0.5 * np.power(z, 2.0) / self.alpha_sq)
        elif (self.penalty == 'trunc-quad'):
            self.phi = lambda z: np.minimum(0.5 * np.power(z, 2.0), 0.5 * self.alpha_sq)
        else:
            assert False, "unrecognized penalty function {}".format(penalty)

        super().__init__(n, 1) # make sure node is properly constructed
        self.eps = 1.0e-4 # relax tolerance on optimality test
Пример #58
0
    def predict(self, test_x, is_diag=1):
        output_scale = np.exp(self.theta[0])
        sigma2_tag = np.exp(self.theta[self.dim+2])
        C = self.kernel(self.src_x, self.tag_x, self.theta)
        L_C = np.linalg.cholesky(C)
        alpha_C = chol_inv(L_C, self.train_y.T)
        k_star_s = self.kernel2(test_x, self.src_x, self.theta)
        k_star_t = self.kernel1(test_x, self.tag_x, self.theta)
        k_star = np.hstack((k_star_s, k_star_t))
        py = np.dot(k_star, alpha_C)

        Cvks = chol_inv(L_C, k_star.T)
        if is_diag:
            ps2 = output_scale + sigma2_tag - (k_star * Cvks.T).sum(axis=1)
        else:
            ps2 = self.kernel1(test_x, test_x, self.theta) + sigma2_tag - np.dot(k_star, Cvks)
        ps2 = np.abs(ps2)
        py = py * self.std + self.mean
        ps2 = ps2 * (self.std**2)
        return py, ps2
Пример #59
0
def lanczos(dx, a=3):
    """Lanczos kernel

    Parameters
    ----------
    dx: float
        amount to shift image
    a: int
        Lanczos window size parameter

    Returns
    -------
    result: array-like
        1D Lanczos kernel
    """
    if np.abs(dx) > 1:
        raise ValueError("The fractional shift dx must be between -1 and 1")
    window = np.arange(-a + 1, a + 1) + np.floor(dx)
    y = np.sinc(dx - window) * np.sinc((dx - window) / a)
    return y, window.astype(int)
Пример #60
0
    def forwardFilter( self, knownLatentStates=None ):

        if( knownLatentStates is not None ):

            assert np.abs( knownLatentStates - knownLatentStates.astype( int ) ).sum() == 0.0
            knownLatentStates = knownLatentStates.astype( int )

            if( knownLatentStates.size == 0 ):
                self.chainCuts = None
            else:

                # Assert that knownLatentStates is sorted
                assert np.any( np.diff( knownLatentStates[ :, 0 ] ) <= 0 ) == False

                # Mark that we are cutting the markov chain at these indices
                self.chainCuts = knownLatentStates
        else:
            self.chainCuts = None

        return super( CategoricalHMM, self ).forwardFilter()