Exemple #1
0
    def test_array1(self):
        """
        Tests functions with single numpy 1d array output
        """

        def f1(x):
            # single scalar input
            return np.array([i*x**i for i in range(5)])

        df = jacobian(f1)(3.0)
        for i, d in enumerate(df):
            self.assertAlmostEqual(i**2 * 3.0 ** (i - 1), d)

        def f2(params):
            # one list, one numpy array input
            x,y = params[0]
            A = params[1]
            return np.linalg.dot(np.sin(A), np.array([x,y**2]))

        A = np.array([[1.0, 2.0],[3.0, 4.0]])
        x,y = 2.0, np.pi
        params = [[x, y], A]
        df = jacobian(f2)(params)
        # df0_dx
        self.assertAlmostEqual(df[0][0][0],  np.sin(A)[0][0])
        # df1_dx
        self.assertAlmostEqual(df[1][0][0],  np.sin(A)[1][0])
        # df0_dy
        self.assertAlmostEqual(df[0][0][1],  2*np.sin(A)[0][1]*y)
        # df1_dy
        self.assertAlmostEqual(df[1][0][1],  2*np.sin(A)[1][1]*y)
        # df_dA
        assert np.linalg.norm(df[0][1][0] - (np.cos(A)*np.array([x,y**2]))[0]) < 1e-10
        assert np.linalg.norm(df[1][1][1] - (np.cos(A)*np.array([x,y**2]))[1]) < 1e-10
def test_third_derivative_other_args2():
    fun = lambda x, y : np.sin(np.sin(x) + np.sin(y))
    df = grad(fun, 1)
    ddf = grad(fun)
    dddf = grad(fun, 1)
    check_grads(fun, npr.randn(), npr.randn())
    check_grads(df, npr.randn(), npr.randn())
    check_grads(ddf, npr.randn(), npr.randn())
    check_grads(dddf, npr.randn(), npr.randn())
Exemple #3
0
def test_jacobian_higher_order():
    fun = lambda x: np.sin(np.outer(x,x)) + np.cos(np.dot(x,x))

    assert jacobian(fun)(npr.randn(3)).shape == (3,3,3)
    assert jacobian(jacobian(fun))(npr.randn(3)).shape == (3,3,3,3)
    # assert jacobian(jacobian(jacobian(fun)))(npr.randn(3)).shape == (3,3,3,3,3)

    check_grads(lambda x: np.sum(np.sin(jacobian(fun)(x))), npr.randn(3))
    check_grads(lambda x: np.sum(np.sin(jacobian(jacobian(fun))(x))), npr.randn(3))
def test_third_derivative():
    fun = lambda x : np.sin(np.sin(x) + np.sin(x))
    df = grad(fun)
    ddf = grad(fun)
    dddf = grad(fun)
    check_grads(fun, npr.randn())
    check_grads(df, npr.rand())
    check_grads(ddf, npr.rand())
    check_grads(dddf, npr.rand())
Exemple #5
0
 def fun(input_dict):
     A = 0.
     B = 0.
     for i, (k, v) in enumerate(sorted(input_dict.items(), key=op.itemgetter(0))):
         A = A + np.sum(np.sin(v)) * (i + 1.0)
         B = B + np.sum(np.cos(v))
     for v in input_dict.values():
         A = A + np.sum(np.sin(v))
     for k in sorted(input_dict.keys()):
         A = A + np.sum(np.cos(input_dict[k]))
     return A + B
def score_sine(G):
    """
    Takes advantage of the non-linear "sine" function.
    Structure similar to score_nbr, but applies sine function at each addition.
    """
    sum_score = 0
    for n in G.nodes():
        sum_score += np.sin(n)
        for nbr in G.neighbors(n):
            sum_score += np.sin(nbr)

    return sum_score
Exemple #7
0
def make_pinwheel_data(radial_std, tangential_std, num_classes, num_per_class, rate):
    rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)

    features = npr.randn(num_classes*num_per_class, 2) \
        * np.array([radial_std, tangential_std])
    features[:,0] += 1.
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:,0])
    rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return 10*npr.permutation(np.einsum('ti,tij->tj', features, rotations))
Exemple #8
0
def test_jacobian_against_stacked_grads():
    scalar_funs = [
        lambda x: np.sum(x ** 3),
        lambda x: np.prod(np.sin(x) + np.sin(x)),
        lambda x: grad(lambda y: np.exp(y) * np.tanh(x[0]))(x[1]),
    ]

    vector_fun = lambda x: np.array([f(x) for f in scalar_funs])

    x = npr.randn(5)
    jac = jacobian(vector_fun)(x)
    grads = [grad(f)(x) for f in scalar_funs]

    assert np.allclose(jac, np.vstack(grads))
Exemple #9
0
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
                  rs=npr.RandomState(0)):
    """Based on code by Ryan P. Adams."""
    rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)

    features = rs.randn(num_classes*num_per_class, 2) \
        * np.array([radial_std, tangential_std])
    features[:, 0] += 1
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:,0])
    rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return np.einsum('ti,tij->tj', features, rotations)
Exemple #10
0
def test_hvp():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5)
    v = npr.randn(5)
    H = hessian(fun)(a)
    hvp = make_hvp(fun)(a)[0]
    check_equivalent(np.dot(H, v), hvp(v))
Exemple #11
0
 def fun(input_dict):
     A = 0.
     B = 0.
     for i, k in enumerate(sorted(input_dict)):
         A = A + np.sum(np.sin(input_dict[k])) * (i + 1.0)
         B = B + np.sum(np.cos(input_dict[k]))
     return A + B
Exemple #12
0
def PyLQR_TrajCtrl_TrackingTest():
    n_pnts = 200
    x_coord = np.linspace(0.0, 2*np.pi, n_pnts)
    y_coord = np.sin(x_coord)
    #concatenate to have trajectory
    ref_traj = np.array([x_coord, y_coord]).T
    weight_mats = [ np.eye(ref_traj.shape[1])*100 ]

    #draw reference trajectory
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hold(True)
    ax.plot(ref_traj[:, 0], ref_traj[:, 1], '.-k', linewidth=3.5)
    ax.plot([ref_traj[0, 0]], [ref_traj[0, 1]], '*k', markersize=16)

    lqr_traj_ctrl = PyLQR_TrajCtrl(use_autograd=True)
    lqr_traj_ctrl.build_ilqr_tracking_solver(ref_traj, weight_mats)

    n_queries = 5

    for i in range(n_queries):
        #start from a perturbed point
        x0 = ref_traj[0, :] + np.random.rand(2) * 2 - 1
        syn_traj = lqr_traj_ctrl.synthesize_trajectory(x0)
        #plot it
        ax.plot(syn_traj[:, 0], syn_traj[:, 1], linewidth=3.5)

    plt.show()
    return
    def predict_expectation(self, X, ancillary_X=None):
        """
        Predict the expectation of lifetimes, :math:`E[T | x]`.

        Parameters
        ----------
        X: numpy array or DataFrame
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        ancillary_X: numpy array or DataFrame, optional
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.

        Returns
        -------
        percentiles: DataFrame
            the median lifetimes for the individuals. If the survival curve of an
            individual does not cross 0.5, then the result is infinity.


        See Also
        --------
        predict_median
        """
        alpha_, beta_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X)
        v = (alpha_ * np.pi / beta_) / np.sin(np.pi / beta_)
        v = np.where(beta_ > 1, v, np.nan)
        return pd.DataFrame(v, index=_get_index(X))
Exemple #14
0
    def test_multi_scalar(self):
        """
        Tests functions with multiple scalar output
        """

        def f1(x):
            # two scalar input
            return x**3, np.exp(3*x)

        df = jacobian(f1)(0.5)
        self.assertAlmostEqual(3*0.5**2, df[0])
        self.assertAlmostEqual(3*np.exp(3*0.5), df[1])

        def f2(params):
            # one list, one numpy array input
            x,y = params[0]
            A = params[1]
            return np.sum(A**2) + np.cos(x) + np.exp(0.5*y)

        df = jacobian(f2)
        A = np.array([[1.0, 2.0],[3.0, 4.0]])
        params = [[0.5, np.pi], A]
        diff = df(params)
        self.assertAlmostEqual(diff[0][0], -np.sin(0.5))
        self.assertAlmostEqual(diff[0][1], 0.5*np.exp(0.5*np.pi))
        self.assertTrue(np.linalg.norm(2*A - diff[1]) < 1e-10)
Exemple #15
0
 def plot_gmm(params, ax, num_points=100):
     angles = np.expand_dims(np.linspace(0, 2*np.pi, num_points), 1)
     xs, ys = np.cos(angles), np.sin(angles)
     circle_pts = np.concatenate([xs, ys], axis=1) * 2.0
     for log_proportion, mean, chol in zip(*unpack_params(params)):
         cur_pts = mean + np.dot(circle_pts, chol)
         ax.plot(cur_pts[:, 0], cur_pts[:, 1], '-')
Exemple #16
0
def test_value_and_grad():
    fun = lambda x: np.sum(np.sin(x)**2)
    dfun = grad(fun)
    dfun_both = value_and_grad(fun)
    x = npr.randn(5)
    check_equivalent(fun(x), dfun_both(x)[0])
    check_equivalent(dfun(x), dfun_both(x)[1])
Exemple #17
0
def test_vector_jacobian_product():
    # This function will have an asymmetric jacobian matrix.
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5)
    V = npr.randn(5)
    J = jacobian(fun)(a)
    check_equivalent(np.dot(V.T, J), vector_jacobian_product(fun)(a, V))
Exemple #18
0
def make_pinwheel_data(num_spokes=5, points_per_spoke=40, rate=1.0, noise_std=0.005):
    """Make synthetic data in the shape of a pinwheel."""
    spoke_angles = np.linspace(0, 2 * np.pi, num_spokes + 1)[:-1]
    rs = npr.RandomState(0)
    x = np.linspace(0.1, 1, points_per_spoke)
    xs = np.concatenate([x * np.cos(angle + x * rate) + noise_std * rs.randn(len(x)) for angle in spoke_angles])
    ys = np.concatenate([x * np.sin(angle + x * rate) + noise_std * rs.randn(len(x)) for angle in spoke_angles])
    return np.concatenate([np.expand_dims(xs, 1), np.expand_dims(ys, 1)], axis=1)
Exemple #19
0
 def plot_ellipse(ax, alpha, mean, cov, line=None):
     t = np.linspace(0, 2*np.pi, 100) % (2*np.pi)
     circle = np.vstack((np.sin(t), np.cos(t)))
     ellipse = 2.*np.dot(np.linalg.cholesky(cov), circle) + mean[:,None]
     if line:
         line.set_data(ellipse)
         line.set_alpha(alpha)
     else:
         ax.plot(ellipse[0], ellipse[1], alpha=alpha, linestyle='-', linewidth=2)
Exemple #20
0
def test_return_both():
    fun = lambda x : 3.0 * np.sin(x)
    d_fun = grad(fun)
    f_and_d_fun = grad(fun, return_function_value=True)

    test_x = npr.randn()
    f, d = f_and_d_fun(test_x)
    assert f == fun(test_x)
    assert d == d_fun(test_x)
Exemple #21
0
def make_pinwheel_data(num_classes, num_per_class, rate=2.0, noise_std=0.001):
    spoke_angles = np.linspace(0, 2*np.pi, num_classes+1)[:-1]

    rs = npr.RandomState(0)
    x = np.linspace(0.1, 1, num_per_class)
    xs = np.concatenate([rate *x * np.cos(angle + x * rate) + noise_std * rs.randn(num_per_class)
                         for angle in spoke_angles])
    ys = np.concatenate([rate *x * np.sin(angle + x * rate) + noise_std * rs.randn(num_per_class)
                         for angle in spoke_angles])
    return np.concatenate([np.expand_dims(xs, 1), np.expand_dims(ys,1)], axis=1)
Exemple #22
0
def exp(r):
    """ matrix exponential under the special orthogonal group SO(3) ->
    converts Rodrigues 3-vector r into 3x3 rotation matrix R"""
    theta = np.linalg.norm(r)
    if (theta == 0):
        return np.eye(3)
    K = hat(r / theta)
    # Compute w/ Rodrigues' formula
    return np.eye(3) + np.sin(theta) * K + \
        (1 - np.cos(theta)) * np.dot(K, K)
Exemple #23
0
def tensorexp(r):
    """ returns a stack of rotation matrices as a tensor """
    """ r should be (3,n), n column vectors """
    theta = np.sqrt(np.sum(r*r, axis=0))  # shape = (n,)
    # note: the case where theta == 0 is not handled; we assume there is enough
    # noise and bias that this won't happen
    K = tensorhat(r / theta)  # shape = (3,3,n)
    KK = np.einsum('ijl,jkl->ikl', K, K)
    # Compute w/ Rodrigues' formula
    return np.eye(3)[:, :, np.newaxis] + np.sin(theta) * K + \
        (1 - np.cos(theta)) * KK
Exemple #24
0
def test_jacobian_against_wrapper():
    A = npr.randn(3,3,3)
    fun = lambda x: np.einsum(
        'ijk,jkl->il',
        A, np.sin(x[...,None] * np.tanh(x[None,...])))

    B = npr.randn(3,3)
    jac1 = jacobian(fun)(B)
    jac2 = old_jacobian(fun)(B)

    assert np.allclose(jac1, jac2)
Exemple #25
0
def test_value_and_grad():
    fun = lambda x: np.sum(np.sin(x)**2)
    dfun = grad(fun)
    dfun_both = value_and_grad(fun)
    x = npr.randn(5)
    assert not isbox(dfun_both(x)[0])
    check_equivalent(fun(x), dfun_both(x)[0])
    check_equivalent(dfun(x), dfun_both(x)[1])

    def fun2(x): return dfun_both(x)[0]
    check_grads(fun2)(x)
Exemple #26
0
def make_data_linreg_1d(N=21, linear=True):
    xtrain = np.linspace(0, 20, N)
    sigma2 = 2
    w_true = np.array([-1.5, 1/9.])
    if linear:
        fun = lambda x: w_true[0] + w_true[1]*x
    else:
        fun = lambda x: w_true[0] + w_true[1]*np.sin(x)
    noise = np.random.normal(0, 1, xtrain.shape) * np.sqrt(sigma2)
    ytrain = fun(xtrain) + noise    
    return xtrain, ytrain, w_true
Exemple #27
0
def rodrigues_rotate_point(rot,X):
    sqtheta = np.sum(np.square(rot))
    if sqtheta != 0.:
        theta = np.sqrt(sqtheta)
        costheta = np.cos(theta)
        sintheta = np.sin(theta)  
        theta_inverse = 1. / theta

        w = theta_inverse * rot
        w_cross_X = cross(w,X)
        tmp = np.dot(w,X) * (1. - costheta)

        return X*costheta + w_cross_X * sintheta + w * tmp
    else:
        return X + cross(rot,X)
Exemple #28
0
 def f(x, u):
     # x, y, th, v
     x, y, th, v = x
     a, phi = u
     return np.array(
         [v * np.cos(th), v * np.sin(th), v * np.tan(phi) / self.l, a])
Exemple #29
0
anp.negative.defjvp(lambda g, ans, gvs, vs, x: -g)
anp.abs.defjvp(lambda g, ans, gvs, vs, x: anp.real(g * npg.replace_zero(
    anp.conj(x), 0.)) / npg.replace_zero(ans, 1.))
anp.fabs.defjvp(lambda g, ans, gvs, vs, x: anp.sign(x) * g
                )  # fabs doesn't take complex numbers.
anp.absolute.defjvp(lambda g, ans, gvs, vs, x: anp.real(g * anp.conj(x)) / ans)
anp.reciprocal.defjvp(lambda g, ans, gvs, vs, x: -g / x**2)
anp.exp.defjvp(lambda g, ans, gvs, vs, x: ans * g)
anp.exp2.defjvp(lambda g, ans, gvs, vs, x: ans * anp.log(2) * g)
anp.expm1.defjvp(lambda g, ans, gvs, vs, x: (ans + 1) * g)
anp.log.defjvp(lambda g, ans, gvs, vs, x: g / x)
anp.log2.defjvp(lambda g, ans, gvs, vs, x: g / x / anp.log(2))
anp.log10.defjvp(lambda g, ans, gvs, vs, x: g / x / anp.log(10))
anp.log1p.defjvp(lambda g, ans, gvs, vs, x: g / (x + 1))
anp.sin.defjvp(lambda g, ans, gvs, vs, x: g * anp.cos(x))
anp.cos.defjvp(lambda g, ans, gvs, vs, x: -g * anp.sin(x))
anp.tan.defjvp(lambda g, ans, gvs, vs, x: g / anp.cos(x)**2)
anp.arcsin.defjvp(lambda g, ans, gvs, vs, x: g / anp.sqrt(1 - x**2))
anp.arccos.defjvp(lambda g, ans, gvs, vs, x: -g / anp.sqrt(1 - x**2))
anp.arctan.defjvp(lambda g, ans, gvs, vs, x: g / (1 + x**2))
anp.sinh.defjvp(lambda g, ans, gvs, vs, x: g * anp.cosh(x))
anp.cosh.defjvp(lambda g, ans, gvs, vs, x: g * anp.sinh(x))
anp.tanh.defjvp(lambda g, ans, gvs, vs, x: g / anp.cosh(x)**2)
anp.arcsinh.defjvp(lambda g, ans, gvs, vs, x: g / anp.sqrt(x**2 + 1))
anp.arccosh.defjvp(lambda g, ans, gvs, vs, x: g / anp.sqrt(x**2 - 1))
anp.arctanh.defjvp(lambda g, ans, gvs, vs, x: g / (1 - x**2))
anp.rad2deg.defjvp(lambda g, ans, gvs, vs, x: g / anp.pi * 180.0)
anp.degrees.defjvp(lambda g, ans, gvs, vs, x: g / anp.pi * 180.0)
anp.deg2rad.defjvp(lambda g, ans, gvs, vs, x: g * anp.pi / 180.0)
anp.radians.defjvp(lambda g, ans, gvs, vs, x: g * anp.pi / 180.0)
anp.square.defjvp(lambda g, ans, gvs, vs, x: g * 2 * x)
 def d_fun(input_dict):
     g = grad(fun)(input_dict)
     A = np.sum(g['item_1'])
     B = np.sum(np.sin(g['item_1']))
     C = np.sum(np.sin(g['item_2']))
     return A + B + C
Exemple #31
0
def to_scalar(x):
    if isinstance(getval(x), list) or isinstance(getval(x), tuple):
        return sum([to_scalar(item) for item in x])
    return np.sum(np.real(np.sin(x)))
Exemple #32
0
 def t1(self, th):
     return np.array([np.cos(th), np.sin(th)])
Exemple #33
0
 def f1(x):
     return anp.sin(x)
Exemple #34
0
## CREATE A SIMULATED CAMERA
cam = Camera()
cam.set_K(fx=800, fy=800, cx=640 / 2., cy=480 / 2.)
cam.set_width_heigth(640, 480)

## DEFINE A SET OF CAMERA POSES IN DIFFERENT POSITIONS BUT ALWAYS LOOKING
# TO THE CENTER OF THE PLANE MODEL

cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(180.0))
cam.set_t(0.0, -0.0, 0.5, frame='world')

r = 0.8
angle = 90
x = r * np.cos(np.deg2rad(angle))
z = r * np.sin(np.deg2rad(angle))
cam.set_t(0, x, z)
cam.set_R_mat(R_matrix_from_euler_t(0.0, 0, 0))
cam.look_at([0, 0, 0])

max_deviation = 0.06
deviation_range = np.arange(0, max_deviation + 0.01, 0.01)

#Now we define a distribution of cameras on the space based on this plane
#An optional paremeter is de possible deviation from uniform points
plane_size = (0.3, 0.3)
#cams = create_cam_distribution(cam, plane_size,
#                               theta_params = (0,360,2), phi_params =  (0,70,2),
#                               r_params = (0.5,2.0,2), plot=False)
cams = create_cam_distribution(cam,
                               plane_size,
Exemple #35
0
def f5(x, bifur=0):
    return -np.array([np.sin(x[2]), np.sin(x[0]), np.sin(x[1])])
Exemple #36
0
def f(x):
   return np.exp(-np.sqrt(x)) * np.sin(x * np.log(1 + (x ** 2)))
Exemple #37
0
def plot_ellipse(ax, mean, cov_sqrt, alpha, num_points=100):
    angles = np.linspace(0, 2 * np.pi, num_points)
    circle_pts = np.vstack([np.cos(angles), np.sin(angles)]).T * 2.0
    cur_pts = mean + np.dot(circle_pts, cov_sqrt)
    ax.plot(cur_pts[:, 0], cur_pts[:, 1], '-', alpha=alpha)
Exemple #38
0
def fun(x):
    y = np.sin(x + x)
    return avg(y, y)
Exemple #39
0
def main():
    """
	Simple unit-test for the gradient optimization algorithms in optimization.py
	"""
    d = 2
    w = np.array([1.0, 2.0])
    f = lambda x: 0.1 * np.sum(x**2) + np.sin(np.dot(x, w))

    sigma = 1.0
    g = grad(f)
    grad_f = lambda x: g(x) + sigma * np.random.randn(d)

    x_0 = np.array([10.0, 8.0])
    """this just tests the functionality with clean gradients."""

    ## gd
    x_opt_reg = gd(grad_f,
                   copy(x_0),
                   callback=None,
                   num_iters=500,
                   step_size=0.1,
                   how='regular')
    x_opt_filt = gd(grad_f,
                    copy(x_0),
                    callback=None,
                    num_iters=500,
                    step_size=0.1,
                    how='filtered')

    print('Regular GD:')
    print('x_opt_reg=', x_opt_reg, 'f(x_opt_reg)=', f(x_opt_reg))
    print('x_opt_filt=', x_opt_filt, 'f(x_opt_filt)=', f(x_opt_filt))
    print()

    #gd + momentum
    x_opt_reg = momgd(grad_f,
                      copy(x_0),
                      callback=None,
                      num_iters=500,
                      step_size=0.1,
                      mass=0.95,
                      how='regular')
    x_opt_filt = momgd(grad_f,
                       copy(x_0),
                       callback=None,
                       num_iters=500,
                       step_size=0.1,
                       mass=0.95,
                       how='filtered')

    print('Momentum GD')
    print('x_opt_reg=', x_opt_reg, 'f(x_opt_reg)=', f(x_opt_reg))
    print('x_opt_filt=', x_opt_filt, 'f(x_opt_filt)=', f(x_opt_filt))
    print()

    # rmsprop
    x_opt_reg = rmsprop(grad_f,
                        copy(x_0),
                        callback=None,
                        num_iters=500,
                        step_size=0.1,
                        gamma=0.99,
                        how='regular')
    x_opt_filt = rmsprop(grad_f,
                         copy(x_0),
                         callback=None,
                         num_iters=500,
                         step_size=0.1,
                         gamma=0.99,
                         how='filtered')

    print('RMSProp')
    print('x_opt_reg=', x_opt_reg, 'f(x_opt_reg)=', f(x_opt_reg))
    print('x_opt_filt=', x_opt_filt, 'f(x_opt_filt)=', f(x_opt_filt))
    print()
def test_sin():
    fun = lambda x : 3.0 * np.sin(x)
    d_fun = grad(fun)
    check_grads(fun, npr.randn())
    check_grads(d_fun, npr.randn())
Exemple #41
0
 def d_fun(input_tuple):
     g = grad(fun)(input_tuple)
     A = np.sum(g[0])
     B = np.sum(np.sin(g[0]))
     C = np.sum(np.sin(g[1]))
     return A + B + C
Exemple #42
0
## CREATE A SIMULATED CAMERA
cam = Camera()
cam.set_K(fx=800, fy=800, cx=640 / 2., cy=480 / 2.)
cam.set_width_heigth(640, 480)

## DEFINE CAMERA POSE LOOKING STRAIGTH DOWN INTO THE PLANE MODEL
cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(180.0))
cam.set_t(0.0, -0.0, 0.5, frame='world')

#cam.set_R_axisAngle(1.0,  0.0,  0.0, np.deg2rad(140.0))
#cam.set_t(0.0,-1,1.0, frame='world')
#
r = 0.8
angle = 30
x = r * np.cos(np.deg2rad(angle))
z = r * np.sin(np.deg2rad(angle))
cam.set_t(0, x, z)
cam.set_R_mat(R_matrix_from_euler_t(0.0, 0, 0))
cam.look_at([0, 0, 0])

#cam.set_R_axisAngle(1.0,  0.0,  0.0, np.deg2rad(110.0))
#cam.set_t(0.0,-0.3,0.1, frame='world')

new_objectPoints = pl.get_points()
#GOOD SET OF POINTS FOR THE GRAPHIC
#new_objectPoints = np.array([[ 0.075, -0.06,   0.06,  -0.06 ],
# [ 0.105,  0.105,  0.105,  0.09 ],
# [ 0.,     0.,     0.,     0.,   ],
# [ 1.,     1.,     1.,     1.,   ]])

new_objectPoints = np.array([[0.12, 0.06, -0.105, 0.105],
Exemple #43
0
def f2(x, bifur=0):
    return np.array([-np.sin(x[1]), -5 * x[0]**2, -np.sin(x[2] - x[1])])
Exemple #44
0
def y(x, w):
    return (w[0] + w[1] * np.sin(2. * np.pi * x)) / (1 + w[2] * x)
def double_cartpole_dynamics(xu):
    """
    http://www.lirmm.fr/~chemori/Temp/Wafa/double%20pendule%20inverse.pdf
    """
    fs_hz = 125
    dt = 1 / fs_hz
    g = 9.81
    Mc = 0.37
    Mp1 = 0.127
    Mp2 = 0.127
    Mt = Mc + Mp1 + Mp2
    L1 = 0.3365
    L2 = 0.3365
    l1 = L1 / 2
    l2 = L2 / 2
    J1 = Mp1 * L1 / 12
    J2 = Mp2 * L2 / 12
    u_mx = 10.0
    input_amp = 3.0  # simulate gear ratio etc

    x, u = xu[:, :6], xu[:, 6:]
    N = x.shape[0]

    q = x[:, 0]
    th1 = x[:, 1]
    th2 = x[:, 2]
    q_dot = x[:, 3]
    th_dot1 = x[:, 4]
    th_dot2 = x[:, 5]

    sth1 = np.sin(th1)
    cth1 = np.cos(th1)
    sth2 = np.sin(th2)
    cth2 = np.cos(th2)
    sdth = np.sin(th1 - th2)
    cdth = np.cos(th1 - th2)

    # helpers
    l1_mp1_mp2 = Mp1 * l1 + Mp2 * L2
    l1_mp1_mp2_cth1 = l1_mp1_mp2 * cth1
    Mp2_l2 = Mp2 * l2
    Mp2_l2_cth2 = Mp2_l2 * cth2
    l1_l2_Mp2 = L1 * l2 * Mp2
    l1_l2_Mp2_cdth = l1_l2_Mp2 * cdth

    # inertia
    M11 = Mt * np.ones((N,))
    M12 = l1_mp1_mp2_cth1
    M13 = Mp2_l2_cth2
    M21 = l1_mp1_mp2_cth1
    M22 = ((l1 ** 2) * Mp1 + (L1 ** 2) * Mp2 + J1) * np.ones((N,))
    M23 = l1_l2_Mp2_cdth
    M31 = Mp2_l2_cth2
    M32 = l1_l2_Mp2_cdth
    M33 = ((l2 ** 2) * Mp2 + J2) * np.ones((N,))

    # coreolis
    C11 = np.zeros((N,))
    C12 = -l1_mp1_mp2 * th_dot1 * sth1
    C13 = -Mp2_l2 * th_dot2 * sth2
    C21 = np.zeros((N,))
    C22 = np.zeros((N,))
    C23 = l1_l2_Mp2 * th_dot2 * sdth
    C31 = np.zeros((N,))
    C32 = -l1_l2_Mp2 * th_dot1 * sdth
    C33 = np.zeros((N,))

    # gravity
    G11 = np.zeros((N,))
    G21 = -(Mp1 * l1 + Mp2 * L1) * g * sth1
    G31 = -Mp2 * l2 * g * sth2

    # make matrices
    M = np.stack(
        (
            np.stack((M11, M21, M31), axis=1),
            np.stack((M12, M22, M32), axis=1),
            np.stack((M13, M23, M33), axis=1),
        ),
        axis=2,
    )

    C = np.stack(
        (
            np.stack((C11, C21, C31), axis=1),
            np.stack((C12, C22, C32), axis=1),
            np.stack((C13, C23, C33), axis=1),
        ),
        axis=2,
    )

    G = np.stack((G11, G21, G31), axis=1)[:, :, None]

    u = input_amp * np.clip(u, -u_mx, u_mx)

    action = np.stack((u, np.zeros(u.shape), np.zeros(u.shape)), axis=1)

    M_inv = np.linalg.inv(M)

    C_x_dot = np.matmul(C, x[:, 3:, None])
    x_dot_dot = np.matmul(M_inv, action - C_x_dot - G).squeeze()

    x_dot = x[:, 3:] + x_dot_dot * dt
    x_pos = x[:, :3] + x_dot * dt

    x2 = np.hstack((x_pos, x_dot))

    return x2
Exemple #46
0
def fit_nn_reg(X,
               y,
               hidden_layer_sizes,
               batch_size,
               epochs,
               X_test,
               y_test,
               no_samples=20,
               mean_y_train=0.0,
               std_y_train=1.0,
               nonln='relu',
               weight_prior_std=1.0,
               noise_var=0.1,
               plot_toy=False,
               init_w=None):

    layer_sizes = np.array([X.shape[1]] + hidden_layer_sizes + [1])
    if nonln == 'tanh':
        nonlinearity = np.tanh
    elif nonln == 'relu':
        nonlinearity = lambda x: np.maximum(x, 0.0)
    elif nonln == 'rbf':
        nonlinearity = lambda x: norm.pdf(x, 0, 1)
    elif nonln == 'sin':
        nonlinearity = lambda x: np.sin(x)
    elif nonln == 'sigmoid':
        nonlinearity = lambda x: 1 / (1 + np.exp(-x))


    num_weights, elbo, predictions, get_error_and_ll, unpack_layers, prediction_test, unpack_params \
        = make_nn_funs(layer_sizes, nonlinearity=nonlinearity, weight_prior_std=weight_prior_std, noise_var=noise_var)
    elbo_grad = grad(elbo)
    prior_var = 1.0
    N_train = X.shape[0]

    print("    Epoch      |   train RMSE   |   test RMSE")

    if plot_toy:
        # Set up figure.
        fig = plt.figure(figsize=(12, 8), facecolor='white')
        ax = fig.add_subplot(111, frameon=True)
        plt.show(block=False)

    def print_perf(epoch, w):
        num_samples_test = 500
        pred_mean, pred_var, rmse_train, ll = get_error_and_ll(
            w, X, y, location=0.0, scale=1.0, num_samples=num_samples_test)
        pred_mean, pred_var, rmse_test, ll = get_error_and_ll(
            w,
            X_test,
            y_test,
            location=0.0,
            scale=1.0,
            num_samples=num_samples_test)
        print("{0:15}|{1:15}|{2:15}|".format(epoch, rmse_train, rmse_test))

        if plot_toy:
            # # Plot data and functions.
            # plt.cla()
            # ax.plot(X.ravel(), y.ravel(), 'bx')
            # plot_inputs = np.reshape(np.linspace(-7, 7, num=300), (300,1))
            # outputs_mean, outputs_var = prediction_test(w, plot_inputs, num_samples_test)
            # ax.plot(plot_inputs, outputs_mean, 'b-')
            # ax.plot(plot_inputs, outputs_mean + 2*np.sqrt(outputs_var), 'b-')
            # ax.plot(plot_inputs, outputs_mean - 2*np.sqrt(outputs_var), 'b-')
            # ax.set_ylim([-1, 1])
            # plt.draw()
            # plt.pause(1.0/60.0)

            # Sample functions from posterior.
            rs = npr.RandomState(0)
            mean, std = unpack_params(w)
            #rs = npr.RandomState(0)
            sample_weights = rs.randn(10, num_weights) * std + mean
            plot_inputs = np.linspace(-7, 7, num=400)
            outputs = predictions(sample_weights,
                                  np.expand_dims(plot_inputs, 1))

            # Plot data and functions.
            plt.cla()
            ax.plot(X.ravel(), y.ravel(), 'bx')
            ax.plot(plot_inputs, outputs[:, :, 0].T)
            ax.set_ylim([-2, 3])
            plt.draw()
            plt.pause(1.0 / 60.0)

    # Train with adam
    batch_idxs = make_batches(X.shape[0], batch_size)

    # Initialize parameters
    rs = npr.RandomState(0)
    if init_w is None:
        init_mean = 0.1 * rs.randn(num_weights)
    else:
        init_mean = init_w

    init_log_std = -2 * np.ones(num_weights)
    init_var_params = np.concatenate([init_mean, init_log_std])
    w = init_var_params

    m1 = 0
    m2 = 0
    beta1 = 0.9
    beta2 = 0.999
    epsilon = 1e-8
    alpha = 5e-3
    t = 0
    elbo_vec = []
    for epoch in range(epochs):
        permutation = np.random.choice(range(X.shape[0]),
                                       X.shape[0],
                                       replace=False)
        # print_perf(epoch, w)
        for idxs in batch_idxs:
            t += 1
            eb = elbo(w,
                      weight_prior_std**2,
                      X[permutation[idxs]],
                      y[permutation[idxs]],
                      N_train,
                      num_samples=no_samples)
            elbo_vec.append(eb)
            print(eb)
            grad_w = elbo_grad(w,
                               weight_prior_std**2,
                               X[permutation[idxs]],
                               y[permutation[idxs]],
                               N_train,
                               num_samples=no_samples)
            m1 = beta1 * m1 + (1 - beta1) * grad_w
            m2 = beta2 * m2 + (1 - beta2) * grad_w**2
            m1_hat = m1 / (1 - beta1**t)
            m2_hat = m2 / (1 - beta2**t)
            w -= alpha * m1_hat / (np.sqrt(m2_hat) + epsilon)
            t += 1

    print_perf(epochs - 1, w)
    return w, get_error_and_ll, prediction_test, unpack_params, elbo_vec
	def sample_sin_task(self, N, amp, phase):
		input_points = np.random.uniform(-5., 5., size = N)
		output = amp * np.sin(input_points + phase)
		return input_points, output
Exemple #48
0
def objective(x):
    return (np.sin(x * 7 / 4) + np.cos(x * 17 / 4))
 def fun(input_dict):
     A = np.sum(np.sin(input_dict['item_1']))
     B = np.sum(np.cos(input_dict['item_2']))
     return A + B
Exemple #50
0
 def f2(params):
     # one list, one numpy array input
     x,y = params[0]
     A = params[1]
     return np.linalg.dot(np.sin(A), np.array([x,y**2]))
 def example_function(x):
     return np.sum(x * np.sin(10.0*x) + x) - 1
Exemple #52
0
def prob7(N=200):
    """Let f(x) = (sin(x) + 1)^sin(cos(x)). Perform the following experiment N
    times:

        1. Choose a random value x0.
        2. Use prob1() to calculate the “exact” value of f′(x0). Time how long
            the entire process takes, including calling prob1() (each
            iteration).
        3. Time how long it takes to get an approximation of f'(x0) using
            cdq4(). Record the absolute error of the approximation.
        4. Time how long it takes to get an approximation of f'(x0) using
            Autograd (calling grad() every time). Record the absolute error of
            the approximation.

    Plot the computation times versus the absolute errors on a log-log plot
    with different colors for SymPy, the difference quotient, and Autograd.
    For SymPy, assume an absolute error of 1e-18.
    """
    #initilize the function
    f = lambda x: (anp.sin(x) + 1)**anp.sin(anp.cos(x))
    #initialize the list
    exact = []
    app1_ = []
    app2_ = []
    err = []
    err1 = []
    err2 = []

    for i in range(N):
        #choose a randome value x0
        x0 = np.random.random()
        #calculate the “exact” value of f′(x)
        #time it
        start1 = time.time()
        df = prob1()
        dff = df(x0)
        exact.append(time.time() - start1)
        err.append(1e-18)

        #using the fourth-order centered difference quotient
        #time it
        start2 = time.time()
        app1 = cdq4(f, x0, h=1e-5)
        app1_.append(time.time() - start2)
        err1.append(abs(dff - app1))

        #using Autograd
        #time it
        start3 = time.time()
        app2 = grad(f)(x0)
        app2_.append(time.time() - start3)
        err2.append(abs(dff - app2))

    #ploting the computation times versus the absolute errors
    plt.loglog(exact, err, "o", alpha=.5, label="Sympy")
    plt.loglog(app1_, err1, "o", alpha=.5, label="Difference Quotients")
    plt.loglog(app2_, err2, "o", alpha=.5, label="Autograd")
    plt.xlabel("Computation Time (seconds)")
    plt.ylabel("Absolute Error")
    plt.legend()
    plt.show()
def plot_ellipse(ax, alpha, mean, cov):
    t = np.linspace(0, 2 * np.pi, 100) % (2 * np.pi)
    circle = np.vstack((np.sin(t), np.cos(t)))
    ellipse = np.dot(np.linalg.cholesky(cov), circle) + mean[:, None]
    ax.plot(ellipse[0], ellipse[1], alpha=1., linestyle='-', linewidth=2)
Exemple #54
0
 def fun(input_tuple):
     A = np.sum(np.sin(input_tuple[0]))
     B = np.sum(np.cos(input_tuple[1]))
     return A + B
Exemple #55
0
def psy_analytic(x):
    return np.exp(-x/5.)*np.sin(x)
Exemple #56
0
def test_trigonometric():
    grad_test(lambda x: ti.tanh(x), lambda x: np.tanh(x))
    grad_test(lambda x: ti.sin(x), lambda x: np.sin(x))
    grad_test(lambda x: ti.cos(x), lambda x: np.cos(x))
    grad_test(lambda x: ti.acos(x), lambda x: np.arccos(x))
    grad_test(lambda x: ti.asin(x), lambda x: np.arcsin(x))
Exemple #57
0
def test_sin():
    fun = lambda x: 3.0 * np.sin(x)
    check_grads(fun)(npr.randn())
Exemple #58
0

time_series =[4,5,4,3,6,2,4,5,10,6,8,2,6,17,23,13,21,28,24,20,40,27,42,33,43,37,57,71,44,56,53,52,47,26,27,21,21,26,34,37,17,19,25,18,21,17,17,16,16,15,23,16,17,12,17,10,15,19,21,14,18,13,14,18,23,25,62,60,76,66,64,68,89,92,140,116,142,129,140,140,127,129,169,141,108,78,70,81,104,90,85,55,53,65,33,38,59,40,37,29,30,30,28,23,24,29,26,23,20,19,20,26,29,31,28,26,32,35,33,30,52,59,67,65,74,70,61,53,76,61,57,44,34,47,60,60,53,36,31,30,32,28,33,33,35,22,13,13,21,17,11,8,8,6,6,7,12,17,10,10,18,19,12,22,12,21,18,16,16,22,17,25,23,12,25,28,27,18,23,23,29,38,36,43,46,31,25,40,31,38,30,22,31,26,35,36,39,25,31,37,33,25,24,18,23,13,18,14,17,22,13,24,31,34,31,31,38,49,42,49,55,80,84,72,89,115,179,202,272,302,395,426,461,381,333,353,410,364,359,288,221,149,112,154,91,72,56,46,37,26,17,17,20,11,7,16,14,16,5,2,6,5,4,3,4,16,8,7,10,14,7,9,11,23,17,19,24,17,28,40,33,31,33,29,30,36,48,40,28,36,19,34,23,17,17,23,14,20,13,23,20,16,16,23,14,15,4,5,5,11,11,7,4,6,5,2,4,2,4,6,6,4,6,11,16,9,12,13,27,21,19,17,24,27,30,29,25,35,33,30,29,31,29,22,27,24,26,29,22,33,24,30,20,17,24,28,18,13,9,14,11,11,19,10,8,8,9,3,7,14,4,9,14,7,9,3,3,14,12,10,21,26,47,42,31,34,33,52,56,70,112,70,47,48,49,66,56,61,67,64,68,49,50,56,75,63,62,41,50,34,31,38,30,32,26,30,36,35,46,48,44,51,59,71,102,128,127,150,191,256,329,263,220,204,181,99,54,80,102,127,73,68,64,55,67,84,85,67,73,89,68,59,56,77,75,47,50,42,28,37,37,27,12,15,22,8,15,17,10,9,11,20,13,11,16,11,7,17,14,13,15,30,25,40,44,25,21,48,56,60,45,55,32,46,61,42,37,43,34,40,25,16,17,17,16,23,18,18,9,7,7,4,3,2,8,3,1,1,2,3,3,2,0,0,2,2,0,6,3,6,2,3,2,4,5,2,9,2,4,8,6,3,11,14,15,20,9,20,28,38,30,30,23,16,22,28,14,17,20,17,10,13,20,9,18,9,8,19,11,4,6,6,8,13,8,8,5,16,12,11,18,10,22,14,16,18,27,38,35,41,51,65,55,54,62,64,56,65,71,75,71,72,47,27,35,25,19,37,38,34,26,19,18,22,16,18,6,12,6,6,3,7,6,1,3,2,2,1,10,3,3,1,1,2,6,3,3,5,4,7,6,5,7,6,4,4,7,9,5,5,10,6,13,6,5,5,9,3,6,11,7,7,15,9,6,6,6,7,10,8,7,12,3,2,7,5,5,7,7,7,7,10,13,10,14,11,20,25,17,18,25,21,31,32,26,35,28,37,41,34,30,39,39,39,34,30,37,29,26,15,22,15,20,14,10,21,14,14,9,11,5,6,7,11,4,3,2,6,10,7,5,3,12,13,10,13,13,8,21,18,8,7,20,14,14,7,14,10,13,27,13,18,16,16,20,17,4,15,8,6,12,15,11,10,15,17,7,7,8,9,12,12,5,4,11,4,5,7,1,1,4,2,6,3,4,10,12,21,26,21,30,45,56,75,83,82,126,119,137,131,112,82,73,43,55,55,53,46,43,29,22,26,13,17,8,13,10,17,19,9,9,9,3,7,7,0,2,3,3,1,3,3,3,7,3,5,11,5,5,6,6,4,4,8,14,12,16,10,16,18,15,23,17,33,15,13,11,14,17,19,20,12,21,7,19,10,13,10,8,21,11,9,14,14,15,18,16,12,20,8,3,13,4,1,10,8,13,10,21,18,21,34,25,34,33,40,42,36,72,75,76,92,71,112,106,101,170,135,106,68,48,48,26,33,29,17,12,13,17,15,14,15,10,9,2,6,8,5,1,2,3,4,3,1,3,5,2,3,2,3,2,2,3,4,3,4,4,4,7,6,15,11,9,9,12,13,13,13,20,28,45,28,34,41,36,38,48,27,23,28,42,30,18,38,28,36,44,41,35,28,28,22,26,24,9,21,10,15]


num_particles = 50
#style.use('ggplot')

import matplotlib.pyplot as plt

#-(1.0/(2*observation_variance))*(theta_i  -  time_series[t])**2  + np.log(1.0/np.sqrt(np.pi*2*observation_variance))
observation_variance = .00000000001
transition_variance = 1000
seasonality = 4

G = np.matrix([[np.cos(2*np.pi/seasonality),np.sin(2*np.pi/seasonality)],[-np.sin(2*np.pi/seasonality),np.cos(2*np.pi/seasonality)]])

class StateSpaceModel:

    def lnprob_theta_i(self, theta_i, theta_t_minus_1, time_series,t):
        #ln poisson observations
            lnprob_theta_i = -np.exp(theta_i[0]) + time_series[t]*theta_i[0] - np.sum(np.log(np.arange(time_series[t])+1))
            transition_sum = 0
            for theta_t_minus_1_i in theta_t_minus_1:
                tmp = np.transpose(np.matmul(G,theta_t_minus_1_i.reshape((-1,1)))).tolist()[0]
              
                transition_sum += 1.0/(np.sqrt(2*np.pi*transition_variance))*np.exp(-.5*(1.0/transition_variance)*((theta_i - tmp )**2))
                
            return (lnprob_theta_i+np.log(transition_sum))
    
    def dlnprob(self, theta_i,theta_t_minus_1,time_series, t):
Exemple #59
0
def to_scalar(x):
    return np.sum(np.real(np.sin(x)))
def visualize3d(func, **kwargs):
    ### input arguments ###
    wmax = 1
    if 'wmax' in kwargs:
        wmax = kwargs['wmax'] + 0.5

    view = [20, -50]
    if 'view' in kwargs:
        view = kwargs['view']

    axes = False
    if 'axes' in kwargs:
        axes = kwargs['axes']

    plot_final = False
    if 'plot_final' in kwargs:
        plot_final = kwargs['plot_final']

    num_contours = 10
    if 'num_contours' in kwargs:
        num_contours = kwargs['num_contours']

    pt = [0, 0]
    if 'pt' in kwargs:
        pt = kwargs['pt']
    pt = np.asarray(pt)
    pt.shape = (2, 1)

    max_steps = 10
    if 'max_steps' in kwargs:
        max_steps = kwargs['max_steps']
    num_samples = 10
    if 'num_samples' in kwargs:
        num_samples = kwargs['num_samples']
    steplength = 1
    if 'steplength' in kwargs:
        steplength = kwargs['steplength']

    ##### construct figure with panels #####
    # construct figure
    fig = plt.figure(figsize=(9, 3))

    # remove whitespace from figure
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1)  # remove whitespace

    # create subplot with 3 panels, plot input function in center plot
    gs = gridspec.GridSpec(1, 2, width_ratios=[1, 2])
    ax = plt.subplot(gs[0], projection='3d')
    ax2 = plt.subplot(gs[1], aspect='equal')

    #### define input space for function and evaluate ####
    w = np.linspace(-wmax, wmax, 200)
    w1_vals, w2_vals = np.meshgrid(w, w)
    w1_vals.shape = (len(w)**2, 1)
    w2_vals.shape = (len(w)**2, 1)
    h = np.concatenate((w1_vals, w2_vals), axis=1)
    func_vals = np.asarray([func(s) for s in h])
    w1_vals.shape = (len(w), len(w))
    w2_vals.shape = (len(w), len(w))
    func_vals.shape = (len(w), len(w))

    # plot function
    ax.plot_surface(w1_vals,
                    w2_vals,
                    func_vals,
                    alpha=0.1,
                    color='w',
                    rstride=25,
                    cstride=25,
                    linewidth=1,
                    edgecolor='k',
                    zorder=2)

    # plot z=0 plane
    ax.plot_surface(w1_vals,
                    w2_vals,
                    func_vals * 0,
                    alpha=0.1,
                    color='w',
                    zorder=1,
                    rstride=25,
                    cstride=25,
                    linewidth=0.3,
                    edgecolor='k')

    ### make contour right plot - as well as horizontal and vertical axes ###
    ax2.contour(w1_vals, w2_vals, func_vals, num_contours, colors='k')
    if axes == True:
        ax2.axhline(linestyle='--', color='k', linewidth=1)
        ax2.axvline(linestyle='--', color='k', linewidth=1)

    #### run local random search algorithm ####
    pt_history, eval_history = random_local_search(func, pt, max_steps,
                                                   num_samples, steplength)

    ### plot circle on which point lies, as well as step length circle - used only for simple quadratic
    if plot_final == True:
        # plot contour of quadratic on which final point was plotted
        f = pt_history[-1]
        val = np.linalg.norm(f)
        theta = np.linspace(0, 1, 400)
        x = val * np.cos(2 * np.pi * theta)
        y = val * np.sin(2 * np.pi * theta)
        ax2.plot(x, y, color='r', linestyle='--', linewidth=1)

        # plot direction sampling circle centered at final point
        x = steplength * np.cos(2 * np.pi * theta) + f[0]
        y = steplength * np.sin(2 * np.pi * theta) + f[1]
        ax2.plot(x, y, color='b', linewidth=1)

    # colors for points
    s = np.linspace(0, 1, len(eval_history[:round(len(eval_history) / 2)]))
    s.shape = (len(s), 1)
    t = np.ones(len(eval_history[round(len(eval_history) / 2):]))
    t.shape = (len(t), 1)
    s = np.vstack((s, t))
    colorspec = []
    colorspec = np.concatenate((s, np.flipud(s)), 1)
    colorspec = np.concatenate((colorspec, np.zeros((len(s), 1))), 1)

    #### scatter path points ####
    for k in range(len(eval_history)):
        ax.scatter(pt_history[k, 0],
                   pt_history[k, 1],
                   0,
                   s=60,
                   c=colorspec[k],
                   edgecolor='k',
                   linewidth=0.5 * math.sqrt((1 / (float(k) + 1))),
                   zorder=3)

        ax2.scatter(pt_history[k, 0],
                    pt_history[k, 1],
                    s=60,
                    c=colorspec[k],
                    edgecolor='k',
                    linewidth=1.5 * math.sqrt((1 / (float(k) + 1))),
                    zorder=3)

    #### connect points with arrows ####
    if len(eval_history) < 10:
        for i in range(len(eval_history) - 1):
            pt1 = pt_history[i]
            pt2 = pt_history[i + 1]

            # draw arrow in left plot
            a = Arrow3D([pt1[0], pt2[0]], [pt1[1], pt2[1]], [0, 0],
                        mutation_scale=10,
                        lw=2,
                        arrowstyle="-|>",
                        color="k")
            ax.add_artist(a)

            # draw 2d arrow in right plot
            ax2.arrow(pt1[0],
                      pt1[1], (pt2[0] - pt1[0]) * 0.78,
                      (pt2[1] - pt1[1]) * 0.78,
                      head_width=0.1,
                      head_length=0.1,
                      fc='k',
                      ec='k',
                      linewidth=3,
                      zorder=2,
                      length_includes_head=True)

    ### cleanup panels ###
    ax.set_xlabel('$w_1$', fontsize=12)
    ax.set_ylabel('$w_2$', fontsize=12, rotation=0)
    ax.set_title('$g(w_1,w_2)$', fontsize=12)
    ax.view_init(view[0], view[1])

    ax2.set_xlabel('$w_1$', fontsize=12)
    ax2.set_ylabel('$w_2$', fontsize=12, rotation=0)

    # clean up axis
    ax.xaxis.pane.fill = False
    ax.yaxis.pane.fill = False
    ax.zaxis.pane.fill = False

    ax.xaxis.pane.set_edgecolor('white')
    ax.yaxis.pane.set_edgecolor('white')
    ax.zaxis.pane.set_edgecolor('white')

    ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
    ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
    ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)

    # plot
    plt.show()