Пример #1
0
def GetVideoTimeSeries():
    TS = []
    VIS_RY = []
    VIS_RX = []
    t0 = None
    lasthsum = None
    lastvsum = None
    hprior, vprior = 0, 0
    for msg in parse.ParseLog(open('../rustlerlog-BMPauR')):
        if msg[0] == 'img':
            _, ts, im = msg
            im = GammaCorrect(im)
            hsum = np.sum(im, axis=0)
            hsum -= np.mean(hsum)
            vsum = np.sum(im, axis=1)
            vsum -= np.mean(vsum)
            if t0 is None:
                t0 = ts
                lasthsum = hsum
                lastvsum = vsum
            hoffset = np.argmax(
                -2*np.arange(-80 - hprior, 81 - hprior)**2 +
                np.correlate(lasthsum, hsum[80:-80], mode='valid')) - 80
            voffset = np.argmax(
                -2*np.arange(-60 - vprior, 61 - vprior)**2 +
                np.correlate(lastvsum, vsum[60:-60], mode='valid')) - 60
            TS.append(ts - t0)
            VIS_RY.append(hoffset)
            VIS_RX.append(voffset)
            hprior, vprior = hoffset, voffset
            lasthsum = hsum
            lastvsum = vsum
    return TS, VIS_RY, VIS_RX
Пример #2
0
    def _hmc_log_probability(self, L, b, A, W):
        """
        Compute the log probability as a function of L.
        This allows us to take the gradients wrt L using autograd.
        :param L:
        :param A:
        :return:
        """
        assert self.B == 1
        import autograd.numpy as anp

        # Compute pairwise distance
        L1 = anp.reshape(L,(self.N,1,self.dim))
        L2 = anp.reshape(L,(1,self.N,self.dim))
        # Mu = a * anp.sqrt(anp.sum((L1-L2)**2, axis=2)) + b
        Mu = -anp.sum((L1-L2)**2, axis=2) + b

        Aoff = A * (1-anp.eye(self.N))
        X = (W - Mu[:,:,None]) * Aoff[:,:,None]

        # Get the covariance and precision
        Sig = self.cov.sigma[0,0]
        Lmb = 1./Sig

        lp = anp.sum(-0.5 * X**2 * Lmb)

        # Log prior of L under spherical Gaussian prior
        lp += -0.5 * anp.sum(L * L / self.eta)

        # Log prior of mu0 under standardGaussian prior
        lp += -0.5 * b ** 2

        return lp
Пример #3
0
def init_params(params, domain, init = None, seed = 0):
    # initialise the location of x and y
    if init is not None:
        re_init = False
        if init[0] <= domain[0, 0] or init[0] >= domain[0, 1]:
            re_init = True
        if init[1] <= domain[1, 0] or init[1] >= domain[1, 1]:
            re_init = True
        if re_init is True:
            print 'invalid initialisation, do random init instead...'
            init = None
    
    if init is None:
        # random initialisation
        init = np.zeros(2)
        np.random.seed(seed)
        init[0] = np.random.random() - 0.5
        init[1] = np.random.random() - 0.5
        init[0] = init[0] * (domain[0, 1]-domain[0, 0]) + 0.5 * (np.sum(domain[0]))
        init[1] = init[1] * (domain[1, 1]-domain[1, 0]) + 0.5 * (np.sum(domain[1]))
              
    params['x'] = init[0]
    params['y'] = init[1]       
    print 'initialise x =', init[0], 'y =', init[1]
        
    return params
Пример #4
0
    def _hmc_log_probability(self, L, mu_0, mu_self, A):
        """
        Compute the log probability as a function of L.
        This allows us to take the gradients wrt L using autograd.
        :param L:
        :param A:
        :return:
        """
        import autograd.numpy as anp
        # Compute pairwise distance
        L1 = anp.reshape(L,(self.N,1,self.dim))
        L2 = anp.reshape(L,(1,self.N,self.dim))
        D = - anp.sum((L1-L2)**2, axis=2)

        # Compute the logit probability
        logit_P = D + mu_0 + mu_self * np.eye(self.N)

        # Take the logistic of the negative distance
        P = 1.0 / (1+anp.exp(-logit_P))

        # Compute the log likelihood
        ll = anp.sum(A * anp.log(P) + (1-A) * anp.log(1-P))

        # Log prior of L under spherical Gaussian prior
        lp = -0.5 * anp.sum(L * L / self.sigma)

        # Log prior of mu0 under standardGaussian prior
        lp += -0.5 * mu_0**2

        lp += -0.5 * mu_self**2

        return ll + lp
Пример #5
0
    def sample_variational_density(params):
        mean = params[0]
        log_std = params[1]
        norm_flow_params = params[2]

        samples = sample_diag_gaussian(mean, log_std, num_samples, rs) 

        logq_zk = variational_log_density(params, samples)
        logq_zk = np.reshape(logq_zk, [num_samples])

        z_k, all_zs = normalizing_flows(samples, norm_flow_params)

        # print (z_k.shape)


        #Need to resample because q0(z) != qk(z)

        normalized_ws = logq_zk / np.sum(logq_zk)

        while np.sum(normalized_ws[:-1]) > 1.:
            print (np.sum(normalized_ws))
            normalized_ws = normalized_ws - .0001
        # normalized_ws = normalized_ws - .0001

        sampled = np.random.multinomial(30, normalized_ws)#, size=1)

        weighted_samples = []
        for i in range(len(sampled)):
            for j in range(sampled[i]):
                weighted_samples.append(z_k[i])

        weighted_samples = np.array(weighted_samples)
        # print (weighted_samples.shape)

        return weighted_samples
Пример #6
0
    def calculate_acceptance_logprob(self, proposal, logprob_proposal,
            logprob_reverse, logdet, images):

        def image_like(src, img):
            # get biggest bounding box needed to consider for this image
            xlim, ylim = self.bounding_boxes[img]
            background_img = self.background_image_dict[img]
            data_img       = img.nelec[ylim[0]:ylim[1], xlim[0]:xlim[1]]
            mask_img       = img.invvar[ylim[0]:ylim[1], xlim[0]:xlim[1]]

            # model image for img, (xlim, ylim)
            model_img, _, _ = src.compute_model_patch(img, xlim=xlim, ylim=ylim)

            # compute current model loglike and proposed model loglike
            ll = poisson_loglike(data      = data_img,
                                 model_img = background_img+model_img,
                                 mask      = mask_img)
            return ll

        # compute current and proposal model likelihoods
        curr_like       = np.sum([image_like(self, img) for img in images])
        curr_logprior   = self.model.logprior(self.params)

        proposal_source = self.model._source_type(proposal, self.model)
        prop_like       = np.sum([image_like(proposal_source, img) for img in images])
        prop_logprior   = self.model.logprior(proposal_source.params)

        # compute acceptance ratio
        accept_ll = (prop_like + prop_logprior) - (curr_like + curr_logprior) + \
                    (logprob_reverse - logprob_proposal) + \
                    logdet
        return accept_ll
Пример #7
0
def get_wine_data():
    print("Loading training data...")
    wine_data_file = open('./wines_data/wine.data', 'r')
    num_class_1, num_class_2, num_class_3 = 59, 71, 48
    wines_data = []

    for line in wine_data_file:
        entries = line.split(',')
        wine_type = int(entries[0]) # 1, 2, or 3
        wine_type_one_hot = [1., 0., 0.] if wine_type == 1 else [0., 1., 0.] if wine_type == 2 else [0., 0., 1.]
        wine_features = map(float, entries[1:-1])

        wine_data = wine_features
        wine_data.extend(wine_type_one_hot)
        wines_data.append(wine_data)

    wines_data = np.array(wines_data)
    np.random.shuffle(wines_data)
    features, labels = wines_data[:, :-3], wines_data[:, -3:]

    num_data_pts = len(wines_data)
    train_set_size = 0.8 * num_data_pts
    train_data, train_labels = features[:train_set_size], labels[:train_set_size]
    test_data, test_labels = features[train_set_size:], labels[train_set_size:]

    assert np.sum(wines_data[:, -3]) == num_class_1
    assert np.sum(wines_data[:, -2]) == num_class_2
    assert np.sum(wines_data[:, -1]) == num_class_3
    return num_data_pts, train_data, train_labels, test_data, test_labels
Пример #8
0
 def fun(input_dict):
     A = 0.
     B = 0.
     for i, k in enumerate(sorted(input_dict)):
         A = A + np.sum(np.sin(input_dict[k])) * (i + 1.0)
         B = B + np.sum(np.cos(input_dict[k]))
     return A + B
Пример #9
0
def poisson_loglike(data, model_img, mask):
    assert model_img.shape == mask.shape
    assert data.shape == model_img.shape
    good_pix = (model_img > 0.) & (mask != 0)
    ll_img   = np.sum(np.log(model_img[good_pix]) * data[good_pix]) - \
               np.sum(model_img[good_pix])
    return ll_img
Пример #10
0
    def setUp(self):
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5

        Y = self.Y = rnd.randn(n1, n2, n3)
        A = self.A = rnd.randn(n1, n2, n3)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (6th order)
        Y1 = Y.reshape(n1, n2, n3, 1, 1, 1)
        Y2 = Y.reshape(1, 1, 1, n1, n2, n3)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n1 * n2 * n3).reshape(n1, n2, n3, n1, n2, n3)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, 1, n1, n2, n3)

        self.correct_hess = np.sum(H * Atensor, axis=(3, 4, 5))

        self.backend = AutogradBackend()
Пример #11
0
    def setUp(self):
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(1, n)
        A = self.A = rnd.randn(1, n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (row vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        self.correct_hess = np.array(Amat.dot(H))

        self.backend = AutogradBackend()
Пример #12
0
    def setUp(self):
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        m = self.m = 10
        n = self.n = 15

        Y = self.Y = rnd.randn(m, n)
        A = self.A = rnd.randn(m, n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (4th order)
        Y1 = Y.reshape(m, n, 1, 1)
        Y2 = Y.reshape(1, 1, m, n)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(m * n).reshape(m, n, m, n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, m, n)

        self.correct_hess = np.sum(H * Atensor, axis=(2, 3))

        self.backend = AutogradBackend()
Пример #13
0
def logZ(natparam):
    neghalfJ, h, a, b = unpack_dense(natparam)
    J = -2*neghalfJ
    L = np.linalg.cholesky(J)
    return 1./2 * np.sum(h * np.linalg.solve(J, h)) \
        - np.sum(np.log(np.diagonal(L, axis1=-1, axis2=-2))) \
        + np.sum(a + b)
Пример #14
0
def logZ(natparam):
    J, h = natparam[:2]
    J = -2*J
    L = np.linalg.cholesky(J)
    return 1./2 * np.sum(h * np.linalg.solve(J, h)) \
        - np.sum(np.log(np.diagonal(L, axis1=-1, axis2=-2))) \
        - sum(map(np.sum, natparam[2:]))
Пример #15
0
 def logprob(weights, inputs, targets):
     eps = 1e-5
     log_prior = -L2_reg * np.sum(weights**2, axis=1)
     preds = sigmoid(predictions(weights, inputs))
     label_probabilities = targets[1,:] * np.log(preds + eps) + (1 - targets[1,:]) * np.log(1 - preds + eps)
     #log_lik = -np.sum((preds - targets)**2, axis=1)[:, 0] / noise_variance
     log_lik = -np.sum(label_probabilities, axis=(1,2))
     return log_prior + log_lik
Пример #16
0
def median_distance(xs1, xs2):
    if len(xs1.shape) == 1:
        diff = xs1[:,np.newaxis] - xs2[np.newaxis,:]
        norms = np.sum(diff * diff, axis=1) ** 0.5
    elif len(xs1.shape) == 2:
        diff = xs1[:,np.newaxis,:] - xs2[np.newaxis,:,:]
        norms = np.sum(diff * diff, axis=2) ** 0.5
    return mat_median(norms)
Пример #17
0
def fit_maxlike(data, r_guess):
    # follows Wikipedia's section on negative binomial max likelihood
    assert np.var(data) > np.mean(data), "Likelihood-maximizing parameters don't exist!"
    loglike = lambda r, p: np.sum(negbin_loglike(r, p, data))
    p = lambda r: np.sum(data) / np.sum(r+data)
    rprime = lambda r: grad(loglike)(r, p(r))
    r = newton(rprime, r_guess)
    return r, p(r)
Пример #18
0
def PhotometricError(iref, inew, R, T, points, D):
    # points is a tuple ([y], [x]); convert to homogeneous
    siz = iref.shape
    npoints = len(points[0])
    f = siz[1]  # focal length, FIXME
    Xref = np.vstack(((points[1] - siz[1]*0.5) / f,  # x
                      (siz[0]*0.5 - points[0]) / f,  # y (left->right hand)
                      np.ones(npoints)))             # z = 1
    # this is confusingly written -- i am broadcasting the translation T to
    # every column, but numpy broadcasting only works if it's rows, hence all
    # the transposes
    # print D * Xref
    Xnew = (np.dot(so3.exp(R), (D * Xref)).T + T).T
    # print Xnew
    # right -> left hand projection
    proj = Xnew[0:2] / Xnew[2]
    p = (-proj[1]*f + siz[0]*0.5, proj[0]*f + siz[1]*0.5)
    margin = 10  # int(siz[0] / 5)
    inwindow_mask = ((p[0] >= margin) & (p[0] < siz[0]-margin-1) &
                     (p[1] >= margin) & (p[1] < siz[1]-margin-1))
    npts_inw = sum(inwindow_mask)
    if npts_inw < 10:
        return 1e6, np.zeros(6 + npoints)
    # todo: filter points which are now out of the window
    oldpointidxs = (points[0][inwindow_mask],
                    points[1][inwindow_mask])
    newpointidxs = (p[0][inwindow_mask], p[1][inwindow_mask])
    origpointidxs = np.nonzero(inwindow_mask)[0]
    E = InterpolatedValues(inew, newpointidxs) - iref[oldpointidxs]
    # dE/dk ->
    # d/dk r_p^2 = d/dk (Inew(w(r, T, D, p)) - Iref(p))^2
    # = -2r_p dInew/dp dp/dw dw/dX dX/dk
    # = -2r_p * g(w(r, T, D, p)) * dw(r, T, D, p)
    # intensity gradients for each point
    Ig = InterpolatedGradients(inew, newpointidxs)
    # TODO: use tensors for this
    # gradients for R, T, and D
    gradient = np.zeros(6 + npoints)
    for i in range(npts_inw):
        # print 'newidx (y,x) = ', newpointidxs[0][i], newpointidxs[1][i]
        # Jacobian of w
        oi = origpointidxs[i]
        Jw = dw(Xref[0][oi], Xref[1][oi], D[oi], R, T)
        # scale back up into pixel space, right->left hand coords to get
        # Jacobian of p
        Jp = f * np.vstack((-Jw[1], Jw[0]))
        # print origpointidxs[i], 'Xref', Xref[:, i], 'Ig', Ig[:, i], \
        #     'dwdRz', Jw[:, 2], 'dpdRz', Jp[:, 2]
        # full Jacobian = 2*E + Ig * Jp
        J = np.sign(E[i]) * np.dot(Ig[:, i], Jp)
        # print '2 E[i]', 2*E[i], 'Ig*Jp', np.dot(Ig[:, i], Jp)
        gradient[:6] += J[:6]
        # print J[:6]
        gradient[6+origpointidxs[i]] += J[6]

    print R, T, np.sum(np.abs(E)), npts_inw
    # return ((0.2*(npoints - npts_inw) + np.dot(E, E)), gradient)
    return np.sum(np.abs(E)) / (npts_inw), gradient / (npts_inw)
Пример #19
0
def grad_KL(params, samples, num_particles,LB):
    S = len(samples)
    #initialize KL to be this
    KL1 = gradient_log_variational(params,samples,0)*(LB-c_i(params,0,S,num_particles)/S)
    KL1 = np.sum(KL1)
    KL2 = gradient_log_variational(params,samples,1)*(LB-c_i(params,1,S,num_particles)/S)
    KL2 = np.sum(KL2)
    KL = np.array([KL1,KL2])
    return KL
Пример #20
0
 def tmp_cost_func(x, u, t, aux):
     err = x[0:self.n_dims_] - self.ref_array[t]
     #autograd does not allow A.dot(B)
     cost = np.dot(np.dot(err, self.weight_array[t]), err) + np.sum(u**2) * self.R_
     if t > self.T_-1:
         #regularize velocity for the termination point
         #autograd does not allow self increment
         cost = cost + np.sum(x[self.n_dims_:]**2)  * self.R_ * self.Q_vel_ratio_
     return cost
Пример #21
0
def logloss(ys, ys_hat, ws=None):
    #print 'ws',ws.shape, 'ys',ys.shape, 'xs',xs.shape, 'B',B.shape
    if ws is None:
        return np.sum(np.log(1 + np.exp(-ys * ys_hat))) / float(len(ys)) #+ (0.5 * reg * np.dot(B, B)) #/ float(len(ys))
    else:
        try:
            return np.sum(ws * np.log(1 + np.exp(-ys * ys_hat))) / float(len(ys)) #+ (0.5 * reg * np.dot(B, B)) #/ float(len(ys))
        except:
            pdb.set_trace()
Пример #22
0
def test_jacobian_higher_order():
    fun = lambda x: np.sin(np.outer(x, x)) + np.cos(np.dot(x, x))

    jacobian(fun)(npr.randn(3)).shape == (3, 3, 3)
    jacobian(jacobian(fun))(npr.randn(3)).shape == (3, 3, 3, 3)
    jacobian(jacobian(jacobian(fun)))(npr.randn(3)).shape == (3, 3, 3, 3, 3)

    check_grads(lambda x: np.sum(jacobian(fun)(x)), npr.randn(3))
    check_grads(lambda x: np.sum(jacobian(jacobian(fun))(x)), npr.randn(3))
Пример #23
0
def softmax_grads(Ks, beta, i, j):
  """
  return the grad of the ith element of weighting w.r.t. j-th element of Ks
  """
  if j == i:
    num = beta*np.exp(Ks[i]*beta) * (np.sum(np.exp(Ks*beta)) - np.exp(Ks[i]*beta))
  else:
    num = -beta*np.exp(Ks[i]*beta + Ks[j]*beta)
  den1 = np.sum(np.exp(Ks*beta))
  return num / (den1 * den1)
Пример #24
0
def beta_grads(Ks, beta, i):
  Karr = np.array(Ks)
  anum = Ks[i] * np.exp(Ks[i] * beta)
  aden = np.sum(np.exp(beta * Karr))
  a = anum / aden

  bnum = np.exp(Ks[i] * beta) * (np.sum(np.multiply(Karr, np.exp(Karr * beta))))
  bden = aden * aden
  b = bnum / bden
  return a - b
Пример #25
0
 def compute_modfeat(self, w):
     mod_feat = self.conv_data_fea.copy()
     mod_dem = np.ones(mod_feat.shape)  ## need to change to accommodate non-binary features
     ws = np.array([w[k*self.F:(k+1)*self.F] for k in range(self.K)])
     w_tiled = np.array([ws for i in range(mod_feat.shape[0])])
     mod_feat = mod_feat * w_tiled
     mod_dem = mod_dem * w_tiled
     mod_feat = np.sum(np.exp(mod_feat), axis=2)
     mod_dem = np.sum(np.exp(mod_dem), axis=2)
     return mod_feat / mod_dem
Пример #26
0
 def fun(input_dict):
     A = 0.
     B = 0.
     for i, (k, v) in enumerate(sorted(input_dict.items(), key=op.itemgetter(0))):
         A = A + np.sum(np.sin(v)) * (i + 1.0)
         B = B + np.sum(np.cos(v))
     for v in input_dict.values():
         A = A + np.sum(np.sin(v))
     for k in sorted(input_dict.keys()):
         A = A + np.sum(np.cos(input_dict[k]))
     return A + B
Пример #27
0
def dist_matrix(X, Y):
    """
    Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
    """
    sx = np.sum(X**2, 1)
    sy = np.sum(Y**2, 1)
    D2 =  sx[:, np.newaxis] - 2.0*np.dot(X, Y.T) + sy[np.newaxis, :] 
    # to prevent numerical errors from taking sqrt of negative numbers
    D2[D2 < 0] = 0
    D = np.sqrt(D2)
    return D
Пример #28
0
def test_grad_and_aux():
    A = npr.randn(5, 4)
    x = npr.randn(4)

    f = lambda x: (np.sum(np.dot(A, x)), x**2)
    g = lambda x: np.sum(np.dot(A, x))

    assert len(grad_and_aux(f)(x)) == 2

    check_equivalent(grad_and_aux(f)(x)[0], grad(g)(x))
    check_equivalent(grad_and_aux(f)(x)[1], x**2)
Пример #29
0
    def _obj(self, m, p, xn, gn, **kwargs):
        ll = self._ll(m, p, xn)

        mw = self.mu_weight_
        mp = self.mu_prior_
        pw = self.precision_weight_
        pp = self.precision_prior_
        prior = (pw-0.5) * np.log(p) - 0.5*p*(mw*(m-mp)**2 + 2*pp)

        res = -1*(np.sum(gn * ll) + np.sum(prior))
        return res
Пример #30
0
def get_tight_constraints(A, b, x):
    LHS = np.dot(A, x)
    assert (LHS < b).all()
#    tight_eps = 1.1
    tight_eps = 0.0001
#    tight_eps = 0.0000000001
    tight = (b - LHS) < tight_eps
#    print 'num_tight:', np.sum(tight)
    A_tight = A[tight]
    b_tight = b[tight]
    print np.sum(tight)
    return A_tight, b_tight
Пример #31
0
def diag_gaussian_log_density(x, mu, log_std):
    return np.sum(normal.logpdf(x, mu, np.exp(log_std)), axis=-1)
Пример #32
0
    def fit(self, X, Y, constraints=None, warm_start=None, initialize=True):
        """Learn parameters using cutting plane method.

        Parameters
        ----------
        X : iterable
            Traing instances. Contains the structured input objects.
            No requirement on the particular form of entries of X is made.

        Y : iterable
            Training labels. Contains the structured labels for inputs in X.
            Needs to have the same length as X.

        contraints : iterable
            Known constraints for warm-starts. List of same length as X.
            Each entry is itself a list of constraints for a given instance x .
            Each constraint is of the form [y_hat, delta_joint_feature, loss], where
            y_hat is a labeling, ``delta_1oint_feature = joint_feature(x, y) - joint_feature(x, y_hat)``
            and loss is the loss for predicting y_hat instead of the true label
            y.

        initialize : boolean, default=True
            Whether to initialize the model for the data.
            Leave this true except if you really know what you are doing.
        """
        if self.verbose:
            print("Training n-slack dual structural SVM")
        cvxopt.solvers.options['show_progress'] = self.verbose > 3
        if initialize:
            self.model.initialize(X, Y)

        self.w = np.zeros(self.model.size_joint_feature) + 1e-6
        # self.w = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
        # self.w = np.ones(self.model.size_joint_feature)
        print("initialize w {}".format(self.w))
        n_samples = len(X)
        stopping_criterion = False
        if constraints is None:
            # fresh start
            constraints = [[] for i in range(n_samples)]
            self.last_active = [[] for i in range(n_samples)]
            self.objective_curve_ = []
            self.primal_objective_curve_ = []
            self.timestamps_ = [time()]
        else:
            # warm start
            objective = self._solve_n_slack_qp(constraints, n_samples)
        try:
            # catch ctrl+c to stop training
            # we have to update at least once after going through the dataset
            for iteration in range(self.max_iter):
                # main loop
                self.timestamps_.append(time() - self.timestamps_[0])
                if self.verbose > 0:
                    print("iteration %d" % iteration)
                if self.verbose > 2:
                    print(self)
                new_constraints = 0

                # generate slices through dataset from batch_size
                if self.batch_size < 1 and not self.batch_size == -1:
                    raise ValueError("batch_size should be integer >= 1 or -1,"
                                     "got %s." % str(self.batch_size))
                batch_size = (self.batch_size
                              if self.batch_size != -1 else len(X))
                n_batches = int(np.ceil(float(len(X)) / batch_size))
                slices = gen_even_slices(n_samples, n_batches)
                indices = np.arange(n_samples)
                slack_sum = 0

                for batch in slices:
                    print("in the batch {}".format(batch))
                    new_constraints_batch = 0
                    verbose = max(0, self.verbose - 3)
                    X_b = X[batch]
                    Y_b = Y[batch]
                    indices_b = indices[batch]

                    # candidate_constraints is a list of tuples (y_hat, delta_joint_feature, slack, loss)
                    candidate_constraints = Parallel(
                        n_jobs=self.n_jobs, verbose=verbose)(
                            delayed(find_constraint)(self.model, x, y, self.w)
                            for x, y in zip(X_b, Y_b))

                    # for each batch, gather new constraints
                    for i, x, y, constraint in zip(indices_b, X_b, Y_b,
                                                   candidate_constraints):
                        # loop over samples in batch
                        y_hat, delta_joint_feature, slack, loss = constraint
                        print("found constraints: y_hat {}".format(y_hat))
                        print("delta joint feature {}".format(
                            delta_joint_feature))
                        print("slack {}".format(slack))
                        print("loss {}".format(loss))
                        slack_sum += slack

                        if self.verbose > 3:
                            print("current slack: %f" % slack)

                        if not loss > 0:
                            # can have y != y_hat but loss = 0 in latent svm.
                            # we need this here as djoint_feature is then != 0
                            continue

                        if self._check_bad_constraint(y_hat, slack,
                                                      constraints[i]):
                            print("----bad constraint----")
                            continue
                        else:
                            print(
                                "----good constraint----: index {}".format(i))
                            print("  ")

                        constraints[i].append(
                            [y_hat, delta_joint_feature, loss])
                        new_constraints_batch += 1

                        print(
                            "finish the finding, and new_constraints_batch {}".
                            format(new_constraints_batch))
                    # after processing the slice, solve the qp
                    if new_constraints_batch:
                        print("----------solving n slack qp------------")
                        objective = self._solve_n_slack_qp(
                            constraints, n_samples)
                        new_constraints += new_constraints_batch

                self.objective_curve_.append(objective)
                self._compute_training_loss(X, Y, iteration)

                primal_objective = (self.C * slack_sum + np.sum(self.w**2) / 2)
                self.primal_objective_curve_.append(primal_objective)

                if self.verbose > 0:
                    print("new constraints: %d, "
                          "cutting plane objective: %f primal objective: %f" %
                          (new_constraints, objective, primal_objective))

                if new_constraints == 0:
                    if self.verbose:
                        print("no additional constraints")
                    stopping_criterion = True

                if (iteration > 1 and self.objective_curve_[-1] -
                        self.objective_curve_[-2] < self.tol):
                    if self.verbose:
                        print("objective converged.")
                    stopping_criterion = True

                if stopping_criterion:
                    if (self.switch_to is not None
                            and self.model.inference_method != self.switch_to):
                        if self.verbose:
                            print("Switching to %s inference" %
                                  str(self.switch_to))
                        self.model.inference_method_ = self.model.inference_method
                        self.model.inference_method = self.switch_to
                        stopping_criterion = False
                        continue
                    else:
                        break

                if self.verbose > 5:
                    print(self.w)

                if self.logger is not None:
                    self.logger(self, iteration)
        except KeyboardInterrupt:
            pass

        print("current w {}".format(self.w))
        self.constraints_ = constraints
        if self.verbose and self.n_jobs == 1:
            print("calls to inference: %d" % self.model.inference_calls)

        if verbose:
            print("Computing final objective.")
        self.timestamps_.append(time() - self.timestamps_[0])
        self.primal_objective_curve_.append(self._objective(X, Y))
        self.objective_curve_.append(objective)
        if self.logger is not None:
            self.logger(self, 'final')
        return self
Пример #33
0
def lnlike_k(ws):
    ''' log likelihood w/ periodic boundary conditions (need for solving w/
    respect to fourier coefficients)
    '''
    return 0.5 * np.sum(
        (aSignal.convolve(ws, _psf) - data_p)**2) / sig_noise**2
Пример #34
0
 def g2(self, X_M):
     return anp.sum(anp.square(X_M - 0.5), axis=1)
Пример #35
0
 def loss(W_flat):
     W = np.reshape(W_flat, (K, D))
     scores = np.dot(X, W.T) + bias
     lp = np.sum(y_oh * scores) - np.sum(logsumexp(scores, axis=1))
     prior = np.sum(-0.5 * (W - mu0)**2 / sigmasq0)
     return -(lp + prior) / N
Пример #36
0
def cost(pts, globalPts, startIdx, distObs, delta_t):
    # Endpoint cost
    posDiff = bspline(0, extractPts(pts,
                                    startIdx + 5)) - globalPts[startIdx + 5]
    velDiff = bsplineVel(0, extractPts(pts, startIdx + 5),
                         delta_t) - bsplineVel(
                             0, extractPts(globalPts, startIdx + 5), delta_t)
    E_ep = lambda_p * np.dot(posDiff, posDiff) + lambda_v * np.dot(
        velDiff, velDiff)

    # Collision cost
    u = np.linspace(0, 1, 5)
    samples = np.vstack((np.repeat(np.arange(6), len(u)), np.tile(u, 6)))

    def computeDist(sample):
        p = bspline(sample[1], extractPts(pts, sample[0] + startIdx))
        return distObs[np.clip(np.int(p[0]), 0, distObs.shape[0] - 1),
                       np.clip(np.int(p[1]), 0, distObs.shape[1] - 1)]

    distances = np.apply_along_axis(computeDist, 0, samples)
    mask = distances <= OBSTACLE_DISTANCE_THRESHOLD
    distances[mask] = np.square(distances[mask] - OBSTACLE_DISTANCE_THRESHOLD
                                ) / (2 * OBSTACLE_DISTANCE_THRESHOLD)
    distances[np.invert(mask)] = 0

    def computeVelocities(sample):
        p = bsplineVel(sample[1], extractPts(pts, sample[0] + startIdx),
                       delta_t)
        return norm(p)

    velocities = np.apply_along_axis(computeVelocities, 0, samples)
    E_c = lambda_c * np.sum(np.dot(distances, velocities)) / (len(u) * 6)

    # Squared derivative cost
    q2, q3, q4 = Q_2(delta_t), Q_3(delta_t), Q_4(delta_t)
    E_q = 0
    for i in range(6):
        A = np.dot(M_6, extractPts(pts, startIdx + i))
        B = A.T
        E_q = E_q + np.sum(lambda_q2 * np.dot(np.dot(B, q2), A) +
                           lambda_q3 * np.dot(np.dot(B, q3), A) +
                           lambda_q4 * np.dot(np.dot(B, q4), A))

    # Derivative limit cost
    max_vel, max_acc, max_jerk, max_snap = np.array([1000, 1000]), np.array(
        [1000, 1000]), np.array([1e10, 1e10]), np.array([1e10, 1e10])
    u = np.linspace(0, 1, 5)
    samples = np.vstack((np.repeat(np.arange(6), len(u)), np.tile(u, 6)))

    def derivativeCost(pFunc, max_p, delta_t):
        def f(sample):
            p = pFunc(sample[1], extractPts(pts, sample[0] + startIdx),
                      delta_t)
            norm_max = norm(max_p)
            norm_p = norm(p)
            return np.exp(norm_p - norm_max) - 1 if norm_p > norm_max else 0

        return f

    E_l = 0
    for sample in zip(samples[0], samples[1]):
        E_l = E_l + derivativeCost(bsplineVel, max_vel, delta_t)(sample)
        E_l = E_l + derivativeCost(bsplineAcc, max_acc, delta_t)(sample)
        E_l = E_l + derivativeCost(bsplineJerk, max_jerk, delta_t)(sample)
        E_l = E_l + derivativeCost(bsplineSnap, max_snap, delta_t)(sample)
    E_l = E_l / (len(u) * 6)

    # Total cost
    E = E_ep + E_c + E_q + E_l

    # if not isinstance(E_ep, autograd.numpy.numpy_boxes.ArrayBox):
    #     print('[{}] {} | {} | {} | {} => {}'.format(startIdx, E_ep, E_c, E_q, E_l, E))
    return E
Пример #37
0
def Envelope(Y, T):
    N = len(Y) // T  # number of envelope points; one for each period of wave
    # get magnitude of fundamental only
    mag = Y[:N * T] * np.exp(2j * np.pi * np.arange(N * T) / T)
    return np.abs(np.sum(mag.reshape((N, T)), axis=1)) / T
Пример #38
0
dd = np.arange(max_duration, step=1)

plt.figure(figsize=(3 * K, 9))
for k in range(K):
    # Plot the durations of the true states
    plt.subplot(3, K, k + 1)
    plt.hist(durations[states == k] - 1, dd, density=True)
    plt.plot(dd,
             nbinom.pmf(dd, true_hsmm.transitions.rs[k],
                        1 - true_hsmm.transitions.ps[k]),
             '-k',
             lw=2,
             label='true')
    if k == K - 1:
        plt.legend(loc="lower right")
    plt.title("State {} (N={})".format(k + 1, np.sum(states == k)))

    # Plot the durations of the inferred states
    plt.subplot(3, K, K + k + 1)
    plt.hist(inf_durations[inf_states == k] - 1, dd, density=True)
    plt.plot(dd,
             nbinom.pmf(dd, hsmm.transitions.rs[k],
                        1 - hsmm.transitions.ps[k]),
             '-r',
             lw=2,
             label='hsmm inf.')
    if k == K - 1:
        plt.legend(loc="lower right")
    plt.title("State {} (N={})".format(k + 1, np.sum(inf_states == k)))

    # Plot the durations of the inferred states
Пример #39
0
 def log_likelihood(self, index=None):
     ll = np.sum([node.log_likelihood(index=index) for node in self.nodes])
     return ll
Пример #40
0
 def fun(input_dict):
     for i, k in enumerate(input_dict):
         A = np.sum(np.sin(input_dict[k])) * (i + 1.0)
         B = np.sum(np.cos(input_dict[k]))
     return A + B
Пример #41
0
 def d_fun(input_dict):
     g = grad(fun)(input_dict)
     A = np.sum(g['item_1'])
     B = np.sum(np.sin(g['item_1']))
     C = np.sum(np.sin(g['item_2']))
     return A + B + C
Пример #42
0
 def fun(input_dict):
     A = np.sum(np.sin(input_dict['item_1']))
     B = np.sum(np.cos(input_dict['item_2']))
     return A + B
Пример #43
0
 def average_variance(Q):
     d = np.sum(Q, axis=1)
     QtQ1 = np.linalg.inv(Q.T / d @ Q)
     return QtQ1.flatten() @ WtW.flatten() / n - ww
for e in range(episodes):
    state = env.reset()
    rewards = []
    while True:
        action = policy(env, w, state, epsilon)
        q_hat = approx(w, state, action)
        q_hat_grad = gradientApprox(w, state, action)
        next_state, reward, done, _ = env.step(action)
        rewards.append(reward)
        if done:
            w += alpha*(reward - q_hat) * q_hat_grad
            break
        else:
            next_action = policy(env, w, next_state, epsilon)
            q_hat_next = approx(w, next_state, next_action)
            w += alpha*(reward - discount*q_hat_next)*q_hat_grad
            state = next_state
    epRewards.append(np.sum(rewards))
    
for i, _ in enumerate(epRewards):
    if i + 100 >= len(epRewards):
        break
    else:
        mean = np.mean(epRewards[i:i+100])
        if mean >= 195:
            print("Episodes before solve", i+1)
            break

plt.plot(epRewards)

Пример #45
0
def lnlike(ws):
    ''' log likelihood 
    '''
    #other = -0.5* np.sum((np.convolve(ws,psf)-data)**2 /sig_noise**2);
    return -0.5 * np.sum((Psi(ws) - data)**2 / sig_noise**2)
Пример #46
0
 def _evaluate(self, x, out, *args, **kwargs):
     part1 = -1. * self.c1 * np.exp(-1. * self.c2 * np.sqrt((1. / self.n_var) * np.sum(x * x, axis=1)))
     part2 = -1. * np.exp((1. / self.n_var) * np.sum(np.cos(self.c3 * x), axis=1))
     out["F"] = part1 + part2 + self.c1 + np.exp(1)
Пример #47
0
 def _evaluate(self, x, out, *args, **kwargs):
     z = anp.power(x, 2) - self.A * anp.cos(2 * anp.pi * x)
     out["F"] = self.A * self.n_var + anp.sum(z, axis=1)
Пример #48
0
def psi_np(X, alpha, beta):
    coefs = (1, 1, beta)[:X.shape[1]]
    return np.exp(-alpha * np.sum(np.dot(X**2, coefs)))
Пример #49
0
def test_slogdet_3d():
    fun = lambda x: np.sum(np.linalg.slogdet(x)[1])
    mat = np.concatenate([(rand_psd(5) + 5 * np.eye(5))[None, ...]
                          for _ in range(3)])
    check_grads(fun)(mat)
Пример #50
0
def bce_loss(x, x_prime):
    temp1 = x * ag_np.log(x_prime + 1e-10)
    temp2 = (1 - x) * ag_np.log(1 - x_prime + 1e-10)
    bce = -ag_np.sum(temp1 + temp2)
    return bce
Пример #51
0
 def log_q(samples_q, q):
     log_q = -0.5 * np.log(
         2 * math.pi * q['v']) - 0.5 * (samples_q - q['m'])**2 / q['v']
     return np.sum(log_q, 1)
Пример #52
0
 def g1(self, X_M):
     return 100 * (self.k + anp.sum(anp.square(X_M - 0.5) - anp.cos(20 * anp.pi * (X_M - 0.5)), axis=1))
Пример #53
0
def lnprior_k(ws, fdensity, alpha, sig):
    #ws = np.absolute(fft.ifft(ws));
    #ws = ws.reshape((n_grid,n_grid));
    pri = np.sum(
        [np.log(prior_i(w, fdensity, alpha, sig)) for w in ws.flatten()])
    return pri.flatten()
Пример #54
0
    def _solve_n_slack_qp(self, constraints, n_samples):
        C = self.C
        joint_features = [c[1] for sample in constraints for c in sample]
        losses = [c[2] for sample in constraints for c in sample]

        joint_feature_matrix = np.vstack(joint_features).astype(np.float)
        n_constraints = len(joint_features)
        P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T))
        # q contains loss from margin-rescaling
        q = cvxopt.matrix(-np.array(losses, dtype=np.float))
        # constraints are a bit tricky. first, all alpha must be >zero
        idy = np.identity(n_constraints)
        tmp1 = np.zeros(n_constraints)
        # box constraint: sum of all alpha for one example must be <= C
        blocks = np.zeros((n_samples, n_constraints))
        first = 0
        for i, sample in enumerate(constraints):
            blocks[i, first:first + len(sample)] = 1
            first += len(sample)
        # positivity constraints:
        if self.negativity_constraint is None:
            # empty constraints
            zero_constr = np.zeros(0)
            joint_features_constr = np.zeros((0, n_constraints))
        else:
            joint_features_constr = joint_feature_matrix.T[
                self.negativity_constraint]
            zero_constr = np.zeros(len(self.negativity_constraint))

        # put together
        G = cvxopt.sparse(
            cvxopt.matrix(np.vstack((-idy, blocks, joint_features_constr))))
        tmp2 = np.ones(n_samples) * C
        h = cvxopt.matrix(np.hstack((tmp1, tmp2, zero_constr)))

        # solve QP model
        cvxopt.solvers.options['feastol'] = 1e-5
        try:
            solution = cvxopt.solvers.qp(P, q, G, h)
        except ValueError:
            solution = {'status': 'error'}
        if solution['status'] != "optimal":
            print("regularizing QP!")
            P = cvxopt.matrix(
                np.dot(joint_feature_matrix, joint_feature_matrix.T) +
                1e-8 * np.eye(joint_feature_matrix.shape[0]))
            print("P {}".format(P))
            solution = cvxopt.solvers.qp(P, q, G, h)
            if solution['status'] != "optimal":
                raise ValueError("QP solver failed. Try regularizing your QP.")

        # Lagrange multipliers
        a = np.ravel(solution['x'])
        self.prune_constraints(constraints, a)
        self.old_solution = solution

        # Support vectors have non zero lagrange multipliers
        sv = a > self.inactive_threshold * C
        box = np.dot(blocks, a)
        if self.verbose > 1:
            print("%d support vectors out of %d points" %
                  (np.sum(sv), n_constraints))
            # calculate per example box constraint:
            print("Box constraints at C: %d" % np.sum(1 - box / C < 1e-3))
            print("dual objective: %f" % -solution['primal objective'])
        self.w = np.dot(a, joint_feature_matrix)
        return -solution['primal objective']
Пример #55
0
 def nb_marginal_likelihood(r):
     # Compute the log likelihood of data with shape r and
     # MLE estimate p = sum(xs) / (N*r + sum(xs))
     ll = np.sum(gammaln(xs + r)) - np.sum(gammaln(xs + 1)) - N * gammaln(r)
     ll += np.sum(xs * np.log(p_star(r))) + N * r * np.log(1 - p_star(r))
     return ll
Пример #56
0
 def log_prior(samples_q, v_prior):
     log_p0 = -0.5 * np.log(
         2 * math.pi * v_prior) - 0.5 * samples_q**2 / v_prior
     return np.sum(log_p0, 1)
Пример #57
0
def fit_linear_regression(Xs,
                          ys,
                          weights=None,
                          mu0=0,
                          sigmasq0=1,
                          nu0=1,
                          Psi0=1,
                          fit_intercept=True):
    """
    Fit a linear regression y_i ~ N(Wx_i + b, diag(S)) for W, b, S.

    :param Xs: array or list of arrays
    :param ys: array or list of arrays
    :param fit_intercept:  if False drop b
    """
    Xs = Xs if isinstance(Xs, (list, tuple)) else [Xs]
    ys = ys if isinstance(ys, (list, tuple)) else [ys]
    assert len(Xs) == len(ys)

    D = Xs[0].shape[1]
    P = ys[0].shape[1]
    assert all([X.shape[1] == D for X in Xs])
    assert all([y.shape[1] == P for y in ys])
    assert all([X.shape[0] == y.shape[0] for X, y in zip(Xs, ys)])

    mu0 = mu0 * np.zeros((P, D))
    sigmasq0 = sigmasq0 * np.eye(D)

    # Make sure the weights are the weights
    if weights is not None:
        weights = weights if isinstance(weights, (list, tuple)) else [weights]
    else:
        weights = [np.ones(X.shape[0]) for X in Xs]

    # Add weak prior on intercept
    if fit_intercept:
        mu0 = np.column_stack((mu0, np.zeros(P)))
        sigmasq0 = block_diag(sigmasq0, np.eye(1))

    # Compute the posterior
    J = np.linalg.inv(sigmasq0)
    h = np.dot(J, mu0.T)

    for X, y, weight in zip(Xs, ys, weights):
        X = np.column_stack((X, np.ones(X.shape[0]))) if fit_intercept else X
        J += np.dot(X.T * weight, X)
        h += np.dot(X.T * weight, y)

    # Solve for the MAP estimate
    W = np.linalg.solve(J, h).T
    if fit_intercept:
        W, b = W[:, :-1], W[:, -1]
    else:
        b = 0

    # Compute the residual and the posterior variance
    nu = nu0
    Psi = Psi0 * np.eye(P)
    for X, y, weight in zip(Xs, ys, weights):
        yhat = np.dot(X, W.T) + b
        resid = y - yhat
        nu += np.sum(weight)
        tmp1 = np.einsum('t,ti,tj->ij', weight, resid, resid)
        tmp2 = np.sum(weight[:, None, None] * resid[:, :, None] *
                      resid[:, None, :],
                      axis=0)
        assert np.allclose(tmp1, tmp2)
        Psi += tmp1

    # Get MAP estimate of posterior covariance
    Sigma = Psi / (nu + P + 1)

    if fit_intercept:
        return W, b, Sigma
    else:
        return W, Sigma
Пример #58
0
    def _evaluate(self, x, out, *args, **kwargs):
        f1 = x[:, 0]
        g = 1 + 9.0 / (self.n_var - 1) * anp.sum(x[:, 1:], axis=1)
        f2 = g * (1 - anp.power((f1 / g), 0.5))

        out["F"] = anp.column_stack([f1, f2])
Пример #59
0
def evaluate(y_test, y_pred):
    error_rate = (np.sum(np.equal(y_test, y_pred).astype(np.float))
            / y_test.size)
    return error_rate
Пример #60
0
 def _cost(self, weights, x, y):
     eta = x @ weights
     l2_term = 0.5 * self.C * np.sum(weights**2)
     cost = l2_term - np.sum(y*eta - np.log(1 + np.exp(eta)))
     return cost