Ejemplo n.º 1
0
def plot_field(dyn_field, coords, normfield=False, color=''):
    x = coords[0]
    y = coords[1]
    z = coords[2]
    #This plots the dynamics field first
    row_sums = dyn_field.sum(axis=0)
    if normfield:
        norm_dyn_field = 100 * dyn_field / row_sums
    else:
        norm_dyn_field = dyn_field

    dyn_field[np.isinf(dyn_field)] = np.nan
    norm_dyn_field[np.isinf(norm_dyn_field)] = np.nan

    if color == '':
        obj = quiver3d(x,
                       y,
                       z,
                       norm_dyn_field[0, :, :, :],
                       norm_dyn_field[1, :, :, :],
                       norm_dyn_field[2, :, :, :],
                       opacity=0.9)
    else:

        obj = quiver3d(x,
                       y,
                       z,
                       norm_dyn_field[0, :, :, :],
                       norm_dyn_field[1, :, :, :],
                       norm_dyn_field[2, :, :, :],
                       opacity=0.9,
                       color=color,
                       mode='2dhooked_arrow')
Ejemplo n.º 2
0
    def _parameter_initialiser(self, x, c=None, n=None, t=None, offset=False):
        log_x = np.log(x)
        log_x[np.isnan(log_x)] = -np.inf
        if (2 in c) or (-1 in c):
            heuristic = "Turnbull"
        else:
            heuristic = "Fleming-Harrington"

        data = {'x' : x, 'c' : c, 'n' : n, 't' : t}
        model = para.Parametric(self, 'MPP', data, offset, False, False)
        fitting_info = {}
        fitting_info['rr'] = 'x'
        fitting_info['heuristic'] = heuristic
        fitting_info['on_d_is_0'] = True
        fitting_info['turnbull_estimator'] = 'Fleming-Harrington'
        fitting_info['init'] = None

        model.fitting_info = fitting_info

        if offset:
            results = mpp(model)
            return (results['gamma'], *results['params'])
        else:
            gumb = para.Gumbel.fit(log_x, c, n, t, how='MLE')
            if not gumb.res.success:
                gumb = para,Gumbel.fit(log_x, c, n, t, how='MPP', heuristic=heuristic)
            mu, sigma = gumb.params
            alpha, beta = np.exp(mu), 1. / sigma
            if (np.isinf(alpha) | np.isnan(alpha)):
                alpha = np.median(x)
            if (np.isinf(beta) | np.isnan(beta)):
                beta = 1.
            return alpha, beta
Ejemplo n.º 3
0
    def check_bad_values(self):
        # Check if x or f(x) is Nan or inf - symptoms that the algorithm reached
        # the constraint barrier
        if np.isnan(self.x[self.k]).any() or np.isinf(self.x[self.k]).any() or \
        np.isnan(self.costFunc(self.x[self.k])).any() or np.isinf(self.costFunc(self.x[self.k])).any():
            self.alpha[self.k] *= self.betaParam

        return self.alpha[self.k]
Ejemplo n.º 4
0
def is_real_num(x):
    """return true if x is a real number"""
    try:
        float(x)
        return not (np.isnan(x) or np.isinf(x))
    except ValueError:
        return False
    def train(self, scale=1.0):
        theta = self.rand_theta(scale)
        self.loss = np.inf
        theta0 = np.copy(theta)
        self.theta = np.copy(theta)

        def loss(theta):
            nlz = self.neg_likelihood(theta)
            return nlz

        gloss = grad(loss)
        
        try:
            fmin_l_bfgs_b(loss, theta0, gloss, maxiter=self.bfgs_iter, m=100, iprint=self.debug)
        except np.linalg.LinAlgError:
            print('Increase noise term and re-optimization')
            theta0 = np.copy(self.theta)
            theta0[1] += np.log(10)
            theta0[2] += np.log(10)
            try:
                fmin_l_bfgs_b(loss, theta0, gloss, maxiter=self.bfgs_iter, m=10, iprint=self.debug)
            except:
                print('Exception caught, L-BFGS early stopping...')
                if self.debug:
                    print(traceback.format_exc())
        except:
            print('Exception caught, L-BFGS early stopping...')
            if self.debug:
                print(traceback.format_exc())

        if(np.isnan(self.loss) or np.isinf(self.loss)):
            print('Fail to build GP model')
            sys.exit(1)

        self.alpha = chol_inv(self.L, self.y.T)
Ejemplo n.º 6
0
def is_real_num(x):
    """return true if x is a real number"""
    try:
        float(x)
        return not (np.isnan(x) or np.isinf(x))
    except ValueError:
        return False
Ejemplo n.º 7
0
def graph_from_smiles(smiles):
    graph = MolGraph()
    mol = MolFromSmiles(smiles)
    if not mol:
        raise ValueError("Could not parse SMILES string:", smiles)
    atoms_by_rd_idx = {}

    rdPartialCharges.ComputeGasteigerCharges(mol)
    for atom in mol.GetAtoms():
        add_Gasteiger = float(atom.GetProp('_GasteigerCharge'))
        if np.isnan(add_Gasteiger) or np.isinf(add_Gasteiger):
            add_Gasteiger = 0.0
        new_atom_node = graph.new_node('atom',
                                       features=atom_features(
                                           atom, add_Gasteiger),
                                       rdkit_ix=atom.GetIdx())
        atoms_by_rd_idx[atom.GetIdx()] = new_atom_node

    for bond in mol.GetBonds():
        atom1_node = atoms_by_rd_idx[bond.GetBeginAtom().GetIdx()]
        atom2_node = atoms_by_rd_idx[bond.GetEndAtom().GetIdx()]
        new_bond_node = graph.new_node('bond', features=bond_features(bond))
        new_bond_node.add_neighbors((atom1_node, atom2_node))
        atom1_node.add_neighbors((atom2_node, ))

    mol_node = graph.new_node('molecule')
    mol_node.add_neighbors(graph.nodes['atom'])
    return graph
Ejemplo n.º 8
0
    def _lop_p(self, theta):

        log_p = 0.0

        for property_type in self._property_types:
            reference_data = self._reference_data[property_type]
            precisions = self._reference_precisions[property_type]

            temperatures = reference_data[:, 0]

            reference_values = reference_data[:, 1]
            surrogate_values = self._surrogate_model.evaluate(
                property_type, theta, temperatures)

            precisions = precisions**-2.0

            if (any(numpy.isnan(surrogate_values))
                    or any(numpy.isinf(surrogate_values))
                    or any(surrogate_values > 1e10)):
                return -numpy.inf

            # Compute likelihood based on gaussian penalty function
            log_p += autograd.numpy.sum(
                distributions.Normal(surrogate_values,
                                     precisions).log_pdf(reference_values))

        return log_p
Ejemplo n.º 9
0
 def _parameter_initialiser(self, x, c=None, n=None, offset=False):
     log_x = np.log(x)
     log_x[np.isnan(log_x)] = 0
     gumb = para.Gumbel.fit(log_x, c, n, how='MLE')
     if not gumb.res.success:
         gumb = para.Gumbel.fit(log_x, c, n, how='MPP')
     mu, sigma = gumb.params
     alpha, beta = np.exp(mu), 1. / sigma
     if (np.isinf(alpha) | np.isnan(alpha)):
         alpha = np.median(x)
     if (np.isinf(beta) | np.isnan(beta)):
         beta = 1.
     if offset:
         gamma = np.min(x) - (np.max(x) - np.min(x)) / 10.
         return gamma, alpha, beta, 1.
     else:
         return alpha, beta, 1.
Ejemplo n.º 10
0
def stop(x, fx, dfdx, cost, it, step, dcost, dx, scales, updated):
    if dcost is None or dx is None or np.isnan(dcost) or np.isinf(dcost):
        done = False
    else:
        done = it >= maxIts
        if not done and updated:
            done = np.abs(dcost) < dcostTol or np.linalg.norm(dx) < dxTol
    return done
Ejemplo n.º 11
0
    def conditional_expected_number_of_purchases_up_to_time(
        self, 
        t, 
        frequency, 
        recency, 
        T
    ):
        """
        Conditional expected number of purchases up to time.

        Calculate the expected number of repeat purchases up to time t for a
        randomly chosen individual from the population, given they have
        purchase history (frequency, recency, T).

        This function uses equation (10) from [2]_.

        Parameters
        ----------
        t: array_like
            times to calculate the expectation for.
        frequency: array_like
            historical frequency of customer.
        recency: array_like
            historical recency of customer.
        T: array_like
            age of the customer.

        Returns
        -------
        array_like

        References
        ----------
        .. [2] Fader, Peter S., Bruce G.S. Hardie, and Ka Lok Lee (2005a),
        "Counting Your Customers the Easy Way: An Alternative to the
        Pareto/NBD Model," Marketing Science, 24 (2), 275-84.
        """

        x = frequency
        r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")

        _a = r + x
        _b = b + x
        _c = a + b + x - 1
        _z = t / (alpha + T + t)
        ln_hyp_term = np.log(hyp2f1(_a, _b, _c, _z))

        # if the value is inf, we are using a different but equivalent
        # formula to compute the function evaluation.
        ln_hyp_term_alt = np.log(hyp2f1(_c - _a, _c - _b, _c, _z)) + (_c - _a - _b) * np.log(1 - _z)
        ln_hyp_term = np.where(np.isinf(ln_hyp_term), ln_hyp_term_alt, ln_hyp_term)
        first_term = (a + b + x - 1) / (a - 1)
        second_term = 1 - np.exp(ln_hyp_term + (r + x) * np.log((alpha + T) / (alpha + t + T)))

        numerator = first_term * second_term
        denominator = 1 + (x > 0) * (a / (b + x - 1)) * ((alpha + T) / (alpha + recency)) ** (r + x)

        return numerator / denominator
Ejemplo n.º 12
0
def _poisson_obj_single(structures,
                        counts,
                        alpha,
                        lengths,
                        bias=None,
                        multiscale_factor=1,
                        multiscale_variances=None,
                        mixture_coefs=None):
    """Computes the poisson objective function for each counts matrix.
    """

    if (bias is not None
            and bias.sum() == 0) or counts.nnz == 0 or counts.null:
        return 0.

    if mixture_coefs is not None and len(structures) != len(mixture_coefs):
        raise ValueError("The number of structures (%d) and of mixture"
                         " coefficents (%d) should be identical." %
                         (len(structures), len(mixture_coefs)))
    elif mixture_coefs is None:
        mixture_coefs = [1.]

    lengths_lowres = decrease_lengths_res(lengths, multiscale_factor)
    ploidy = int(structures[0].shape[0] / lengths_lowres.sum())

    if multiscale_variances is not None:
        if isinstance(multiscale_variances, np.ndarray):
            var_per_dis = multiscale_variances[
                counts.row3d] + multiscale_variances[counts.col3d]
        else:
            var_per_dis = multiscale_variances * 2
    else:
        var_per_dis = 0
    num_highres_per_lowres_bins = counts.count_fullres_per_lowres_bins(
        multiscale_factor)

    lambda_intensity = ag_np.zeros(counts.nnz)
    for struct, gamma in zip(structures, mixture_coefs):
        dis = ag_np.sqrt((ag_np.square(struct[counts.row3d] -
                                       struct[counts.col3d])).sum(axis=1))
        if multiscale_variances is None:
            tmp1 = ag_np.power(dis, alpha)
        else:
            tmp1 = ag_np.power(ag_np.square(dis) + var_per_dis, alpha / 2)
        tmp = tmp1.reshape(-1, counts.nnz).sum(axis=0)
        lambda_intensity = lambda_intensity + gamma * counts.bias_per_bin(
            bias, ploidy) * counts.beta * num_highres_per_lowres_bins * tmp

    # Sum main objective function
    obj = lambda_intensity.sum() - (counts.data *
                                    ag_np.log(lambda_intensity)).sum()

    if ag_np.isnan(obj):
        raise ValueError("Poisson component of objective function is nan")
    elif ag_np.isinf(obj):
        raise ValueError("Poisson component of objective function is infinite")

    return counts.weight * obj
Ejemplo n.º 13
0
    def test_optimize_locs_width(self):
        """
        Test the function optimize_locs_width(..). Make sure it does not return 
        unusual results.
        """
        # sample source
        n = 600
        dim = 2
        seed = 17

        ss = data.SSGaussMeanDiff(dim, my=1.0)
        #ss = data.SSGaussVarDiff(dim)
        #ss = data.SSSameGauss(dim)
        # ss = data.SSBlobs()
        dim = ss.dim()

        dat = ss.sample(n, seed=seed)
        tr, te = dat.split_tr_te(tr_proportion=0.5, seed=10)
        xy_tr = tr.stack_xy()

        # initialize test_locs by drawing the a Gaussian fitted to the data
        # number of test locations
        J = 3
        V0 = util.fit_gaussian_draw(xy_tr, J, seed=seed + 1)
        med = util.meddistance(xy_tr, subsample=1000)
        gwidth0 = med**2
        assert gwidth0 > 0

        # optimize
        V_opt, gw2_opt, opt_info = tst.GaussUMETest.optimize_locs_width(
            tr,
            V0,
            gwidth0,
            reg=1e-2,
            max_iter=100,
            tol_fun=1e-5,
            disp=False,
            locs_bounds_frac=100,
            gwidth_lb=None,
            gwidth_ub=None)

        # perform the test using the optimized parameters on the test set
        alpha = 0.01
        ume_opt = tst.GaussUMETest(V_opt,
                                   gw2_opt,
                                   n_simulate=2000,
                                   alpha=alpha)
        test_result = ume_opt.perform_test(te)

        assert test_result['h0_rejected']
        assert util.is_real_num(gw2_opt)
        assert gw2_opt > 0
        assert np.all(np.logical_not((np.isnan(V_opt))))
        assert np.all(np.logical_not((np.isinf(V_opt))))
Ejemplo n.º 14
0
def Newton(f, x, y, n=1000, eps=np.sqrt(eps)):
    while True:
        fxy = f(x, y)
        if np.isinf(fxy).any() or np.isnan(fxy).any() or n == 0:
            return np.nan, np.nan
        else:
            xn, yn = Newton_step(f, x, y)
            # print("x, y / xn, yn:", x, y, "/", xn, yn)
            if (x - xn) * (x - xn) + (y - yn) * (y - yn) <= eps * eps:
                return xn, yn
            x, y = xn, yn
            n = n - 1
Ejemplo n.º 15
0
def fit_new_py(x, model):
    x0 = np.copy(x).reshape(-1)
    best_x = np.copy(x)
    best_loss = np.inf

    def loss(x0):
        nonlocal best_x
        nonlocal best_loss
        x0 = x0.reshape(model.dim, -1)
        py, ps2 = model.models[0].predict(x0)
        tmp_loss = py.sum()
        for i in range(1, model.outdim):
            py, ps2 = model.models[0].predict(x0)
            tmp_loss += np.maximum(0, py).sum()
        if tmp_loss < best_loss:
            best_loss = tmp_loss
            best_x = np.copy(x0)
        return tmp_loss

    gloss = grad(loss)

    try:
        fmin_l_bfgs_b(loss,
                      x0,
                      gloss,
                      bounds=[[-0.5, 0.5]] * x.size,
                      maxiter=2000,
                      m=100,
                      iprint=model.debug)
    except np.linalg.LinAlgError:
        print('Fit_new_py. Increase noise term and re-optimization')
        x0 = np.copy(best_x).reshape(-1)
        x0[0] += 0.01
        try:
            fmin_l_bfgs_b(loss,
                          x0,
                          gloss,
                          bounds=[[-0.5, 0.5]] * x.size,
                          maxiter=2000,
                          m=10,
                          iprint=model.debug)
        except:
            print('Fit_new_py. Exception caught, L-BFGS early stopping...')
            print(traceback.format_exc())
    except:
        print('Fit_new_py. Exception caught, L-BFGS early stopping...')
        print(traceback.format_exc())

    if (np.isnan(best_loss) or np.isinf(best_loss)):
        print('Fit_new_py. Fail to build GP model')
        sys.exit(1)

    return best_x
def kl_tril(L, m, Lzz,u):
    """KL divergence of q(u) and p(u)"""
    M = L.shape[0]
    traceterm = np.sum(np.linalg.solve(Lzz, L)**2)
    mkmterm = np.sum(np.linalg.solve(Lzz,u-m)**2)
    logdetk = 2 * np.sum(np.log(np.abs(np.diag(Lzz))))
    logdets = 2 * np.sum(np.log(np.abs(np.diag(L))))
    kl = 0.5 * (traceterm + logdetk - logdets - M + mkmterm)
    if __verify:
        S = L @ L.T
        Kzz = Lzz @ Lzz.T
        traceterm2 = np.trace(np.linalg.solve(Kzz, S))
        mkmterm2 = np.dot((u-m).T,np.linalg.solve(Kzz, u-m))[0,0]
        logdetk2 = np.log(np.linalg.det(Kzz))
        logdets2 = np.log(np.linalg.det(S))
        wh.assert_close(traceterm, traceterm2)
        wh.assert_close(mkmterm, mkmterm2)
        if not np.isinf(logdetk2) and not np.isnan(logdetk2):
            wh.assert_close(logdetk, logdetk2, rtol=5e-2)
        if not np.isinf(logdets2) and not np.isnan(logdets2):
            wh.assert_close(logdets, logdets2, rtol=5e-2)
    return kl
Ejemplo n.º 17
0
    def optimize(self, x, bounds):
        x0 = np.copy(x)
        self.x = np.copy(x)
        self.loss = np.inf

        def loss(x):
            x = x.reshape(self.dim, x.size / self.dim)
            py, ps2 = self.predict(x)
            py = py.sum()
            if py < self.loss:
                self.loss = py
                self.x = x.copy()
            return py

        gloss = grad(loss)

        try:
            fmin_l_bfgs_b(loss,
                          x0,
                          gloss,
                          bounds=bounds,
                          maxiter=200,
                          m=100,
                          iprint=1)
        except np.linalg.LinAlgError:
            print('Increase noise term and re-optimization')
            x0 = np.copy(self.x)
            x0[0] += 1.0
            try:
                fmin_l_bfgs_b(loss,
                              x0,
                              gloss,
                              bounds=bounds,
                              maxiter=200,
                              m=10,
                              iprint=1)
            except:
                print('Exception caught, L-BFGS early stopping...')
                print(traceback.format.exc())
        except:
            print('Exception caught, L-BFGS early stopping...')
            print(traceback.format_exc())

        print('Optimized loss is %g' % self.loss)
        if (np.isinf(self.loss) or np.isnan(self.loss)):
            print('Fail to build GP model')
            sys.exit(1)

        print('best_x', self.x)
        print('predict', self.predict(self.x))
        print('loss', self.loss)
Ejemplo n.º 18
0
    def train(self, scale=1.0):
        theta = self.rand_theta(scale)
        self.loss = np.inf
        theta0 = np.copy(theta)
        self.theta = theta0.copy()

        def loss(theta):
            nlz = self.neg_likelihood(theta)
            return nlz

        gloss = grad(loss)

        try:
            fmin_l_bfgs_b(loss,
                          theta0,
                          gloss,
                          maxiter=self.bfgs_iter,
                          m=100,
                          iprint=self.debug)
        except np.linalg.LinAlgError:
            print('GP. Increase noise term and re-optimization')
            theta0 = np.copy(self.theta)
            theta0[0] += np.log(10)
            try:
                fmin_l_bfgs_b(loss,
                              theta0,
                              gloss,
                              maxiter=self.bfgs_iter,
                              m=10,
                              iprint=self.debug)
            except:
                print('GP. Exception caught, L-BFGS early stopping...')
                if self.debug:
                    print(traceback.format_exc())
        except:
            print('GP. Exception caught, L-BFGS early stopping...')
            if self.debug:
                print(traceback.format_exc())

        if (np.isinf(self.loss) or np.isnan(self.loss)):
            print('GP. Fail to build GP model')
            sys.exit(1)

        self.alpha = chol_inv(self.L, self.train_y.T)
        if self.k:
            self.for_diag = np.exp(self.theta[1]) * np.exp(
                self.theta[3]) + np.exp(self.theta[3 + self.dim])
        else:
            self.for_diag = np.exp(self.theta[1])
        print('GP. Finished training process')
Ejemplo n.º 19
0
    def conditional_expected_number_of_purchases_up_to_time(
            self, t, frequency, recency, T):
        """
        Conditional expected number of purchases up to time.

        Calculate the expected number of repeat purchases up to time t for a
        randomly choose individual from the population, given they have
        purchase history (frequency, recency, T)

        Parameters
        ----------
        t: array_like
            times to calculate the expectation for.
        frequency: array_like
            historical frequency of customer.
        recency: array_like
            historical recency of customer.
        T: array_like
            age of the customer.

        Returns
        -------
        array_like

        """
        x = frequency
        r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")

        _a = r + x
        _b = b + x
        _c = a + b + x - 1
        _z = t / (alpha + T + t)
        ln_hyp_term = np.log(hyp2f1(_a, _b, _c, _z))

        # if the value is inf, we are using a different but equivalent
        # formula to compute the function evaluation.
        ln_hyp_term_alt = np.log(hyp2f1(_c - _a, _c - _b, _c,
                                        _z)) + (_c - _a - _b) * np.log(1 - _z)
        ln_hyp_term = np.where(np.isinf(ln_hyp_term), ln_hyp_term_alt,
                               ln_hyp_term)
        first_term = (a + b + x - 1) / (a - 1)
        second_term = 1 - np.exp(ln_hyp_term +
                                 (r + x) * np.log((alpha + T) /
                                                  (alpha + t + T)))

        numerator = first_term * second_term
        denominator = 1 + (x > 0) * (a / (b + x - 1)) * (
            (alpha + T) / (alpha + recency))**(r + x)

        return numerator / denominator
Ejemplo n.º 20
0
def plot_fields(dyn_field, ctrl_field, coords, normfield=False):
    x = coords[0]
    y = coords[1]
    z = coords[2]
    #This plots the dynamics field first
    row_sums = dyn_field.sum(axis=0)
    if normfield:
        norm_dyn_field = 100 * dyn_field / row_sums
    else:
        norm_dyn_field = dyn_field

    dyn_field[np.isinf(dyn_field)] = np.nan
    norm_dyn_field[np.isinf(norm_dyn_field)] = np.nan

    obj = quiver3d(x, y, z, norm_dyn_field[0, :, :, :],
                   norm_dyn_field[1, :, :, :], norm_dyn_field[2, :, :, :])

    obj2 = quiver3d(x,
                    y,
                    z,
                    ctrl_field[0, :, :, :],
                    ctrl_field[1, :, :],
                    ctrl_field[2, :, :],
                    opacity=0.1)
Ejemplo n.º 21
0
def pixel_likelihood(z, w, m, fluxes, fluxes_ivar, lam0, B):
    """ compute the likelihood of 5 bands given
        z    : (scalar) red-shift of observed source
        w    : (vector) K positive weights for positive rest-frame basis
        x    : (vector) 5 pixel values corresponding to UGRIZ
        lam0 : basis wavelength values
        B    : (matrix) K x P basis 
    """
    if np.isinf(m): 
        return -np.inf
    # at rest frame for lam0
    lam_obs = lam0 * (1. + z)
    spec    = np.dot(w, B)
    mu      = ru.project_to_bands(spec, lam_obs) * m / (1. + z)
    ll      = -0.5 * np.sum(fluxes_ivar * (fluxes-mu)*(fluxes-mu))
    return ll
Ejemplo n.º 22
0
    def train(self):
        theta0 = self.get_default_theta()
        self.loss = np.inf
        self.theta = np.copy(theta0)

        nlz = self.neg_log_likelihood(theta0)

        def loss(theta):
            nlz = self.neg_log_likelihood(theta)
            return nlz

        def callback(theta):
            if self.nlz < self.loss:
                self.loss = self.nlz
                self.theta = np.copy(theta)

        gloss = value_and_grad(loss)

        try:
            fmin_l_bfgs_b(gloss, theta0, maxiter=self.bfgs_iter, m = 100, iprint=self.debug, callback=callback)
        except np.linalg.LinAlgError:
            print('GP. Increase noise term and re-optimization')
            theta0 = np.copy(self.theta)
            theta0[0] += np.log(10)
            try:
                fmin_l_bfgs_b(gloss, theta0, maxiter=self.bfgs_iter, m=10, iprint=self.debug, callback=callback)
            except:
                print('GP. Exception caught, L-BFGS early stopping...')
                if self.debug:
                    print(traceback.format_exc())
        except:
            print('GP. Exception caught, L-BFGS early stopping...')
            if self.debug:
                print(traceback.format_exc())

        if(np.isinf(self.loss) or np.isnan(self.loss)):
            print('GP. Failed to build GP model')
            sys.exit(1)


        sn2 = np.exp(self.theta[0])
        K = self.kernel(self.train_x, self.train_x, self.theta) + sn2 * np.eye(self.num_train) + self.jitter*np.eye(self.num_train)
        self.L = np.linalg.cholesky(K)
        self.alpha = chol_inv(self.L, self.train_y.T)
        self.for_diag = np.exp(self.theta[1])
        print('GP. GP model training process finished')
Ejemplo n.º 23
0
    def fit(self, theta):
        self.loss = np.inf
        theta0 = np.copy(theta)
        self.theta = np.copy(theta)

        def loss(theta):
            nlz = self.log_likelihood(theta)
            return nlz

        gloss = grad(loss)

        try:
            fmin_l_bfgs_b(loss,
                          theta0,
                          gloss,
                          maxiter=self.bfgs_iter,
                          m=100,
                          iprint=0)
        except np.linalg.LinAlgError:
            print('Increase noise term and re-optimization')
            theta0 = np.copy(self.theta)
            theta0[0] += np.log(10)
            try:
                fmin_l_bfgs_b(loss,
                              theta0,
                              gloss,
                              maxiter=self.bfgs_iter,
                              m=10,
                              iprint=0)
            except:
                print('Exception caught, L-BFGS early stopping...')
                if self.debug:
                    print(traceback.format_exc())
        except:
            print('Exception caught, L-BFGS early stopping...')
            if self.debug:
                print(traceback.format_exc())

        # print('Optimized loss is %g' % self.loss)
        if (np.isinf(self.loss) or np.isnan(self.loss)):
            print('Fail to build GP model')
            sys.exit(1)

        sn2, sp2, log_lscale, w = self.split_theta(self.theta)
        Phi = self.calc_Phi(w, scale_x(log_lscale, self.train_x))
        self.alpha = chol_inv(self.LA, np.dot(Phi, self.train_y.T))
Ejemplo n.º 24
0
    def fit(self, x):
        x0 = np.copy(x)
        self.x = np.copy(x)
        self.loss = np.inf

        def loss(x):
            loss = -self.wEI(x)
            if loss < self.loss:
                self.loss = loss
                self.x = np.copy(x)
            return loss

        gloss = grad(x)

        try:
            fmin_l_bfgs_b(loss,
                          x0,
                          gloss,
                          maxiter=200,
                          m=100,
                          iprint=self.debug)
        except np.linalg.LinAlgError:
            print('Increase noise term and re-optimization')
            x0 = np.copy(self.x)
            x0[0] += 0.01
            try:
                fmin_l_bfgs_b(loss,
                              x0,
                              gloss,
                              maxiter=200,
                              m=10,
                              iprint=self.debug)
            except:
                print('Exception caught, L-BFGS early stopping...')
                print(traceback.format_exc())
        except:
            print('Exception caught, L-BFGS early stopping...')
            print(traceback.format_exc())

        if (np.isnan(self.loss) or np.isinf(self.loss)):
            print('Fail to build GP model')
            sys.exit(1)

        return self.x
Ejemplo n.º 25
0
    def train(self):
        theta0 = self.get_default_theta()
        self.loss = np.inf
        self.theta = np.copy(theta0)
        hyp_bounds = [[None, None]] * (self.dim+3)
        hyp_bounds.extend([[-1,1]])

        nlz = self.neg_log_likelihood(theta0)

        def loss(theta):
            nlz = self.neg_log_likelihood(theta)
            return nlz

        def callback(theta):
            if self.nlz < self.loss:
                self.loss = self.nlz
                self.theta = np.copy(theta)

        gloss = value_and_grad(loss)

        try:
            fmin_l_bfgs_b(gloss, theta0, bounds=hyp_bounds, maxiter=self.bfgs_iter, m = 100, iprint=self.debug, callback=callback)
        except np.linalg.LinAlgError:
            print('TGP. Increase noise term and re-optimization')
            theta0 = np.copy(self.theta)
            theta0[self.dim+1] += np.log(10)
            theta0[self.dim+2] += np.log(10)
            try:
                fmin_l_bfgs_b(gloss, theta0, bounds=hyp_bounds, maxiter=self.bfgs_iter, m=10, iprint=self.debug, callback=callback)
            except:
                print('TGP. Exception caught, L-BFGS early stopping...')
                if self.debug:
                    print(traceback.format_exc())
        except:
            print('TGP. Exception caught, L-BFGS early stopping...')
            if self.debug:
                print(traceback.format_exc())

        if(np.isinf(self.loss) or np.isnan(self.loss)):
            print('TGP. Failed to build TGP model')
            sys.exit(1)

        print('TGP. TGP model training process finished')
Ejemplo n.º 26
0
    def test_optimize_locs_width(self):
        """
        Test the function optimize_locs_width(..). Make sure it does not return 
        unusual results.
        """
        # sample source 
        n = 600
        dim = 2
        seed = 17

        ss = data.SSGaussMeanDiff(dim, my=1.0)
        #ss = data.SSGaussVarDiff(dim)
        #ss = data.SSSameGauss(dim)
        # ss = data.SSBlobs()
        dim = ss.dim()

        dat = ss.sample(n, seed=seed)
        tr, te = dat.split_tr_te(tr_proportion=0.5, seed=10)
        xy_tr = tr.stack_xy()

        # initialize test_locs by drawing the a Gaussian fitted to the data
        # number of test locations
        J = 3
        V0 = util.fit_gaussian_draw(xy_tr, J, seed=seed+1)
        med = util.meddistance(xy_tr, subsample=1000)
        gwidth0 = med**2
        assert gwidth0 > 0

        # optimize
        V_opt, gw2_opt, opt_info = tst.GaussUMETest.optimize_locs_width(tr, V0, gwidth0, reg=1e-2,
            max_iter=100,  tol_fun=1e-5, disp=False, locs_bounds_frac=100,
            gwidth_lb=None, gwidth_ub=None)

        # perform the test using the optimized parameters on the test set
        alpha = 0.01
        ume_opt = tst.GaussUMETest(V_opt, gw2_opt, n_simulate=2000, alpha=alpha)
        test_result = ume_opt.perform_test(te)

        assert test_result['h0_rejected']
        assert util.is_real_num(gw2_opt)
        assert gw2_opt > 0
        assert np.all(np.logical_not((np.isnan(V_opt))))
        assert np.all(np.logical_not((np.isinf(V_opt))))
Ejemplo n.º 27
0
    def fit(self, theta):
        theta0     = theta.copy()
        self.loss  = np.inf
        self.theta = theta0;
        def loss(w):
            nlz = self.log_likelihood(w);
            return nlz
        gloss      = grad(loss)
        try:
            fmin_l_bfgs_b(loss, theta0, gloss, maxiter = self.bfgs_iter, m = 100, iprint=1)
        except np.linalg.LinAlgError:
            print("Increase noise term and re-optimization")
            theta0     = np.copy(self.theta);
            theta0[0] += np.log(10);
            try:
                fmin_l_bfgs_b(loss, theta0, gloss, maxiter = self.bfgs_iter, m = 10, iprint=1)
            except:
                print("Exception caught, L-BFGS early stopping...")
                if self.debug:
                    print(traceback.format_exc())
        except:
            print("Exception caught, L-BFGS early stopping...")
            if self.debug:
                print(traceback.format_exc())

        print("Optimized loss is %g" % self.loss)
        if(np.isinf(self.loss) or np.isnan(self.loss)):
            print("Fail to build GP model")
            sys.exit(1)

        # pre-computation
        log_sn      = self.theta[0]
        log_sp      = self.theta[1]
        log_lscales = self.theta[2:2+self.dim]
        w           = self.theta[2+self.dim:]
        sn2         = np.exp(2 * log_sn)
        sp          = np.exp(log_sp);
        sp2         = np.exp(2*log_sp);
        Phi         = self.calc_Phi(w, scale_x(self.train_x, log_lscales))
        m           = self.m
        self.alpha  = chol_solve(self.LA, np.dot(Phi, self.train_y_zero.T))
Ejemplo n.º 28
0
 def next_t(path, t, dist):
     p = path.point(t)
     L = path.length()
     # t += 1.0 / np.abs(path.derivative(t))
     dd = dist/(1 + curv_spacing*np.abs(path.curvature(t)))
     if np.isinf(dd) or dd == 0:
         dd = dist
     itr = 0
     while itr < 50:
         itr += 1
         p1 = path.point(t)
         err = np.abs(p1 - p) - dd
         d1 = path.derivative(t)
         if np.abs(err) < 1e-5:
             return t, p1, d1 / np.abs(d1)
         derr = np.abs(d1) * L
         # do a step in Newton's method (clipped because some of the
         # gradients in the curve are really small)
         t -= np.clip(err / derr, -1e-2, 1e-2)
         t = np.clip(t, 0, 1)
     return t, p, d1 / np.abs(d1)
Ejemplo n.º 29
0
Archivo: lols.py Proyecto: hal3/aglols
def sgd(learner, numEpochs, mkTrainingData, devData, testData, weights, computeLosses=None, batchSize=1, outputFrequency=1, outputExpDelay=False, eta0=0.01, initial_t=0, power_t=0.5, extraObjective=None, adaptive=False, clipping=False, targetDict=None, senseIsMinimize=True):
    global globalEpoch, globalBestWeights
    globalEpoch,globalBestWeights = 0, None
    printUpdate  = makePrintUpdate(learner, mkTrainingData, devData, testData, computeLosses, targetDict=targetDict, senseIsMinimize=senseIsMinimize)
    globalEpoch = 0
    sum_grad_squared = None
    totalExamples = 0
    for epoch in range(1, numEpochs+1):
        trainingData = mkTrainingData()
        for start in range(0, len(trainingData), batchSize):
            data = trainingData[start:start+batchSize]
        
            learner.set_weights_copy(weights.copy())
            obj_and_grad = value_and_grad(learner, data, weights, extraObjective)
            _, gradient = obj_and_grad(weights)
            eta = eta0 / (1 if power_t == 0 else ((epoch + initial_t) ** power_t))
            gradient[np.isnan(gradient)] = 0
            gradient[np.isinf(gradient)] = 0
            gradient *= eta
            if clipping:
                numBig = sum(gradient < -1) + sum(gradient > 1)
                if numBig > 0:
                    print 'clipping %d / %d gradient terms, avg|grad| %g' % (numBig, len(gradient), np.mean(np.abs(gradient)))
                    gradient[gradient > 1] = 1
                    gradient[gradient < -1] = -1
            if adaptive:
                if sum_grad_squared is None:
                    sum_grad_squared = 1e-4 + gradient * gradient
                else:
                    gradient /= np.sqrt(sum_grad_squared)
                    sum_grad_squared += gradient * gradient
            weights -= gradient
            if outputExpDelay and log2(totalExamples) != log2(totalExamples+len(data)):
                printUpdate(weights, totalExamples)
            totalExamples += len(data)
        if epoch % outputFrequency == 0:
            printUpdate(weights)
    return globalBestWeights
Ejemplo n.º 30
0
def load_stamps_and_samps(gstamps):

    # gather all stamp files!
    print "loading available stamps"
    gstamps.sort()
    stamp_ids = extract_stamp_ids(gstamps)
    stamps    = stamps2array(gstamps)

    # gather all samps!
    print "loading MCMC sample files"
    gal_chain_template = 'samp_cache/run5/gal_samps_stamp_%s_chain_0.bin'
    gal_chain_files = [gal_chain_template%sid for sid in stamp_ids]
    chain_mask      = np.zeros(len(stamp_ids), dtype=np.bool)  # keep track of the ones that actually have samples
    Nselect = 500
    Nskip   = 5
    samps  = []
    for i,chain in enumerate(gal_chain_files):
        print "Galaxy", os.path.basename(chain)

        ## 0. load four chains from disk
        src_samp_chains, ll_samp_chains, eps_samp_chains = \
            io.load_mcmc_chains(chain, num_chains=4)

        if len(src_samp_chains) > 0:
            th            = rec2matrix( np.concatenate(src_samp_chains))
            # make sure there are no infinite samples
            if np.any(np.isinf(th)) or np.any(np.isnan(th)):
                continue
            chain_mask[i] = True
            samps.append(th[-Nselect*Nskip:-1:Nskip, :])

    print "There are %d chains with either missing, zeros, or otherwise unsuitable samples"%((~chain_mask).sum())

    # samps and stamps now aligned
    stamps = stamps[chain_mask, :, :, :]
    samps  = np.array(samps)
    return stamps, samps
Ejemplo n.º 31
0
def vi_obj(theta, q, ln_q, ln_1_q, ln_s, mu, sigma, n_u, n_y, raw_sample_w):

    c_theta = theta[:8]

    u = theta[8:8 + 3 * n_u + 9 * n_u**2]

    mu_u = theta[8 + 3 * n_u + 9 * n_u**2:8 + 3 * n_u + 9 * n_u**2 +
                 n_u].reshape(-1, 1)

    sigma_u = theta[8 + 3 * n_u + 9 * n_u**2 + n_u:-6].reshape(-1, 1)

    C_u, C_g_u = kernel(c_theta, mu_u, sigma_u)

    C_wu, C_g_wu = kernel_test(c_theta, mu, sigma, mu_u, sigma_u)

    C_wu = C_wu[:3 * n_u, :].transpose()

    for i in range(0, 10):
        C_g_wu[i] = C_g_wu[i][:3 * n_u, :].transpose()

    C_diag_w, C_g_diag_w = kernel_diag(c_theta, mu, sigma)

    sample_w, A_u_g, L_u_g, C_u_g, C_diag_w_g, C_wu_g = \
        get_sample_w(u, C_u.ravel(), C_wu.ravel(), C_diag_w.ravel(), raw_sample_w, n_u, n_y)

    # mu_shift = match_prior(mu_shift_0=theta[-6:], C_u=C_u, sample_size=32)

    mu_shift = theta[-6:]

    link_ll = mc_link_lik(sample_w, mu_shift, q, ln_q, ln_1_q, ln_s)

    kl = get_noraml_kl(u, C_u.ravel(), n_u)

    link_g = -get_mc_link_g(sample_w, mu_shift, q, ln_q, ln_1_q, ln_s)

    mu_shift_g = -get_mu_shift_g(sample_w, mu_shift, q, ln_q, ln_1_q, ln_s)

    kl_g = get_kl_g(u, C_u.ravel(), n_u).ravel()

    kl_g_C_u = get_kl_g_C_u(u, C_u.ravel(), n_u).ravel()

    mu_shift_g[numpy.isnan(mu_shift_g)] = 0

    mu_shift_g[numpy.isinf(mu_shift_g)] = 0

    kl_g[numpy.isnan(kl_g)] = 0

    kl_g[numpy.isinf(kl_g)] = 0

    A_u_g[numpy.isnan(A_u_g)] = 0

    A_u_g[numpy.isinf(A_u_g)] = 0

    L_u_g[numpy.isnan(L_u_g)] = 0

    L_u_g[numpy.isinf(L_u_g)] = 0

    C_u_g[numpy.isnan(C_u_g)] = 0

    C_u_g[numpy.isinf(C_u_g)] = 0

    C_diag_w_g[numpy.isnan(C_diag_w_g)] = 0

    C_diag_w_g[numpy.isinf(C_diag_w_g)] = 0

    C_wu_g[numpy.isnan(C_wu_g)] = 0

    C_wu_g[numpy.isinf(C_wu_g)] = 0

    link_g[numpy.isnan(link_g)] = 0

    link_g[numpy.isinf(link_g)] = 0

    kl_g_C_u[numpy.isnan(kl_g_C_u)] = 0

    kl_g_C_u[numpy.isinf(kl_g_C_u)] = 0

    obj = -link_ll + kl

    u_g = numpy.zeros_like(u)

    u_g[:3 * n_u] = numpy.matmul(link_g.ravel().reshape(1, -1), A_u_g)

    u_g[3 * n_u:] = numpy.matmul(link_g.ravel().reshape(1, -1), L_u_g)

    u_g = u_g + kl_g

    theta_g = numpy.zeros_like(c_theta)

    for i in range(0, len(theta_g)):

        theta_g[i] = numpy.matmul(numpy.matmul(link_g.ravel().reshape(1, -1), C_u_g),
                                  C_g_u[i].ravel().reshape(-1, 1))[0][0] + \
        numpy.matmul(numpy.matmul(link_g.ravel().reshape(1, -1), C_wu_g),
                     C_g_wu[i].ravel().reshape(-1, 1))[0][0] + \
        numpy.matmul(numpy.matmul(link_g.ravel().reshape(1, -1), C_diag_w_g),
                     C_g_diag_w[i].ravel().reshape(-1, 1))[0][0] + \
        numpy.matmul(kl_g_C_u.ravel().reshape(1, -1), C_g_u[i].ravel().reshape(-1, 1))[0][0]

    mu_C_u_g = numpy.matmul(link_g.ravel().reshape(1, -1), C_u_g).reshape(3*n_u, 3*n_u) * C_g_u[8] + \
        kl_g_C_u.reshape(3*n_u, 3*n_u) * C_g_u[8]

    sigma_C_u_g = numpy.matmul(link_g.ravel().reshape(1, -1), C_u_g).reshape(3*n_u, 3*n_u) * C_g_u[9] + \
        kl_g_C_u.reshape(3*n_u, 3*n_u) * C_g_u[9]

    mu_C_wu_g = numpy.matmul(link_g.ravel().reshape(1, -1), C_wu_g).reshape(
        -1, 3 * n_u) * C_g_wu[8]

    sigma_C_wu_g = numpy.matmul(link_g.ravel().reshape(1, -1), C_wu_g).reshape(
        -1, 3 * n_u) * C_g_wu[9]

    mu_g = numpy.sum(mu_C_u_g[numpy.arange(0, n_u)*3, :], axis=1).ravel()*2 + \
        numpy.sum(mu_C_u_g[numpy.arange(0, n_u)*3+1, :], axis=1).ravel()*2 + \
        numpy.sum(mu_C_u_g[numpy.arange(0, n_u)*3+2, :], axis=1).ravel()*2 + \
        numpy.sum(mu_C_wu_g[:, numpy.arange(0, n_u)*3], axis=0).ravel() + \
        numpy.sum(mu_C_wu_g[:, numpy.arange(0, n_u)*3+1], axis=0).ravel() + \
        numpy.sum(mu_C_wu_g[:, numpy.arange(0, n_u)*3+2], axis=0).ravel()

    sigma_g = numpy.sum(sigma_C_u_g[numpy.arange(0, n_u)*3, :], axis=1).ravel()*2 + \
        numpy.sum(sigma_C_u_g[numpy.arange(0, n_u)*3+1, :], axis=1).ravel()*2 + \
        numpy.sum(sigma_C_u_g[numpy.arange(0, n_u)*3+2, :], axis=1).ravel()*2 + \
        numpy.sum(sigma_C_wu_g[:, numpy.arange(0, n_u)*3], axis=0).ravel() + \
        numpy.sum(sigma_C_wu_g[:, numpy.arange(0, n_u)*3+1], axis=0).ravel() + \
        numpy.sum(sigma_C_wu_g[:, numpy.arange(0, n_u)*3+2], axis=0).ravel()

    obj_g = numpy.hstack([theta_g, u_g, mu_g, sigma_g])

    obj_g[numpy.isnan(obj_g)] = 0

    obj_g[numpy.isinf(obj_g)] = 0

    # print(numpy.array([numpy.sum(ln_s)/n_y, link_ll/n_y, -link_ll, kl, obj]))

    return obj, numpy.hstack([theta_g, u_g, mu_g, sigma_g,
                              mu_shift_g]), -link_ll, -kl
Ejemplo n.º 32
0
def match_prior(mu_shift_0,
                C_u,
                sample_size=128,
                lr=1e-3,
                beta_1=0.9,
                beta_2=0.999,
                maxiter=int(1024),
                factr=1e-4):

    n_u = int(numpy.shape(C_u)[0] / 3)

    L = []

    m = numpy.zeros(6)

    v = numpy.zeros(6)

    mu_shift = mu_shift_0

    fin_mu_shift = mu_shift_0

    fin_L = None

    for i in range(0, maxiter):

        w = scipy.stats.multivariate_normal(mean=numpy.zeros(3 * n_u),
                                            cov=C_u).rvs(sample_size)

        L.append(prior_error(mu_shift, w, n_u))

        g = prior_error_grad(mu_shift, w, n_u)

        g[numpy.isnan(g)] = 0.0

        g[numpy.isinf(g)] = 0.0

        m = beta_1 * m + (1 - beta_1) * g

        v = beta_2 * v + (1 - beta_2) * g * g

        mu_shift = mu_shift - lr * m / (v**0.5 + eps)

        if len(L) >= 2:
            if L[-1] < numpy.min(L[:-1]):

                fin_L = L[-1].copy()

                fin_mu_shift = mu_shift.copy()

        if len(L) > 32:

            previous_opt = numpy.min(L.copy()[:-32])

            current_opt = numpy.min(L.copy()[-32:])

            if previous_opt - current_opt <= numpy.abs(previous_opt * factr):

                break

    print(
        '============================================================================='
    )

    print('Prior Matched: ')

    print('Total Iterations: ' + str(i),
          ', Loss: ' + str(fin_L) + ', Mu_Shift:' + str(fin_mu_shift))

    print(
        '============================================================================='
    )

    return fin_mu_shift
Ejemplo n.º 33
0
def get_calibration(t_test, mu_test, sigma_test, mu_w, cov_w, mu_shift):

    n_y = numpy.shape(mu_test)[0]

    n_t = numpy.shape(t_test)[1]

    q_hat = numpy.zeros((n_y, n_t))

    s_hat = numpy.zeros((n_y, n_t))

    for i in range(0, n_y):

        ln_s = scipy.stats.norm.logpdf(x=t_test,
                                       loc=mu_test[i, :],
                                       scale=sigma_test[i, :]).reshape(-1, 1)

        feature_q = numpy.hstack([
            scipy.stats.norm.logcdf(x=t_test,
                                    loc=mu_test[i, :],
                                    scale=sigma_test[i, :]).reshape(-1, 1),
            scipy.stats.norm.logsf(x=t_test,
                                   loc=mu_test[i, :],
                                   scale=sigma_test[i, :]).reshape(-1, 1),
            numpy.ones((n_t, 1))
        ])

        w_sample = scipy.stats.multivariate_normal.rvs(size=1024,
                                                       mean=mu_w[i, :],
                                                       cov=cov_w[i, :, :])

        w_sample[:, 0] = -numpy.exp(w_sample[:, 0] / mu_shift[0] + mu_shift[1])

        w_sample[:, 1] = numpy.exp(w_sample[:, 1] / mu_shift[2] + mu_shift[3])

        w_sample[:, 2] = w_sample[:, 2] / mu_shift[4] + mu_shift[5]

        raw_prod = numpy.matmul(feature_q, w_sample.transpose())

        MAX = raw_prod.copy()

        MAX[MAX < 0] = 0

        q_hat[i, :] = numpy.mean(numpy.exp(-MAX) /
                                 (numpy.exp(-MAX) + numpy.exp(raw_prod - MAX)),
                                 axis=1).ravel()

        tmp_de = numpy.where(
            raw_prod <= 0, 2 * numpy.log(1 + numpy.exp(raw_prod)),
            2 * (raw_prod + numpy.log(1 + 1 / numpy.exp(raw_prod))))

        ln_s_hat = (raw_prod + numpy.log(
            (w_sample[:, 0] + w_sample[:, 1]) *
            numpy.exp(feature_q[:, 0].reshape(-1, 1)) - w_sample[:, 0]) -
                    feature_q[:, 0].reshape(-1, 1) -
                    feature_q[:, 1].reshape(-1, 1) - tmp_de) + ln_s

        mc_s_hat = numpy.exp(ln_s_hat)

        mc_s_hat[numpy.isnan(mc_s_hat)] = 0

        mc_s_hat[numpy.isinf(mc_s_hat)] = 0

        s_hat[i, :] = numpy.mean(mc_s_hat, axis=1).ravel()

    return s_hat, q_hat
Ejemplo n.º 34
0
def polyinterp(points, doPlot=None, xminBound=None, xmaxBound=None):
    """ polynomial interpolation
    Parameters
    ----------
    points: shape(pointNum, 3), three columns represents x, f, g
    doPolot: set to 1 to plot, default 0
    xmin: min value that brackets minimum (default: min of points)
    xmax: max value that brackets maximum (default: max of points)
    
    set f or g to sqrt(-1)=1j if they are not known
    the order of the polynomial is the number of known f and g values minus 1

    Returns
    -------
    minPos:
    fmin:
    """
    
    if doPlot == None:
        doPlot = 0

    nPoints = points.shape[0]
    order = np.sum(np.imag(points[:, 1:3]) == 0) -1
    
    # code for most common case: cubic interpolation of 2 points
    if nPoints == 2 and order == 3 and doPlot == 0:
        [minVal, minPos] = [np.min(points[:,0]), np.argmin(points[:,0])]
        notMinPos = 1 - minPos
        d1 = points[minPos,2] + points[notMinPos,2] - 3*(points[minPos,1]-\
                points[notMinPos,1])/(points[minPos,0]-points[notMinPos,0])

        t_d2 =  d1**2 - points[minPos,2]*points[notMinPos,2]
        if t_d2 > 0:
            d2 = np.sqrt(t_d2)
        else:
            d2 = np.sqrt(-t_d2) * np.complex(0,1)
        if np.isreal(d2):
            t = points[notMinPos,0] - (points[notMinPos,0]-points[minPos,0])*\
                    ((points[notMinPos,2]+d2-d1)/(points[notMinPos,2]-\
                    points[minPos,2]+2*d2))
            minPos = np.min([np.max([t,points[minPos,0]]), points[notMinPos,0]])
        else:
            minPos = np.mean(points[:,0])
        fmin = minVal
        return (minPos, fmin)
    
    xmin = np.min(points[:,0])
    xmax = np.max(points[:,0])

    # compute bounds of interpolation area
    if xminBound == None:
        xminBound = xmin
    if xmaxBound == None:
        xmaxBound = xmax

    # constraints based on available function values
    A = np.zeros((0, order+1))
    b = np.zeros((0, 1))
    for i in range(nPoints):
        if np.imag(points[i,1]) == 0:
            constraint = np.zeros(order+1)
            for j in np.arange(order,-1,-1):
                constraint[order-j] = points[i,0]**j
            A = np.vstack((A, constraint))
            b = np.append(b, points[i,1])
    
    # constraints based on availabe derivatives
    for i in range(nPoints):
        if np.isreal(points[i,2]):
            constraint = np.zeros(order+1)
            for j in range(1,order+1):
                constraint[j-1] = (order-j+1)* points[i,0]**(order-j)
            A = np.vstack((A, constraint))
            b = np.append(b,points[i,2])
    
    # find interpolating polynomial
    params = np.linalg.solve(A, b)

    # compute critical points
    dParams = np.zeros(order)
    for i in range(params.size-1):
        dParams[i] = params[i] * (order-i)
    
    if np.any(np.isinf(dParams)):
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0]))
    else:
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0], \
                np.roots(dParams)))
    
    # test critical points
    fmin = np.infty;
    minPos = (xminBound + xmaxBound)/2.
    for xCP in cp:
        if np.imag(xCP) == 0 and xCP >= xminBound and xCP <= xmaxBound:
            fCP = np.polyval(params, xCP)
            if np.imag(fCP) == 0 and fCP < fmin:
                minPos = np.double(np.real(xCP))
                fmin = np.double(np.real(fCP))
    
    # plot situation (omit this part for now since we are not going to use it
    # anyway)

    return (minPos, fmin)
Ejemplo n.º 35
0
def isLegal(v):
    return np.sum(np.any(np.imag(v)))==0 and np.sum(np.isnan(v))==0 and \
            np.sum(np.isinf(v))==0
Ejemplo n.º 36
0
    #####################################################################
    # fit model to galaxy shape parameters
    # 
    #   re  - [0, infty], transformation log
    #   ab  - [0, 1], transformation log (ab / (1 - ab))
    #   phi - [0, 180], transformation log (phi / (180 - phi))
    #
    ######################################################################
    print "fitting galaxy shape"
    shape_df = np.row_stack([ coadd_df[['expRad_r', 'expAB_r', 'expPhi_r']].values,
                              coadd_df[['deVRad_r', 'deVAB_r', 'deVPhi_r']].values ])[::3,:]
    shape_df[:,0] = np.log(shape_df[:,0])
    shape_df[:,1] = np.log(shape_df[:,1]) - np.log(1.-shape_df[:,1])
    shape_df[:,2] = shape_df[:,2] * (np.pi / 180.)

    bad_idx = np.any(np.isinf(shape_df), axis=1)
    shape_df = shape_df[~bad_idx,:]
    gal_re_mog = fit_mog(shape_df[:,0], mog_class = GalRadiusMoG, max_comps=50)
    gal_ab_mog = fit_mog(shape_df[:,1], mog_class = GalAbMoG, max_comps=50)

    with open('gal_re_mog.pkl', 'wb') as f:
        pickle.dump(gal_re_mog, f)

    with open('gal_ab_mog.pkl', 'wb') as f:
        pickle.dump(gal_ab_mog, f)


    #####################################################################
    # fit star => galaxy proposal distributions
    #
    #   re  - [0, infty], transformation log
Ejemplo n.º 37
0
 def to_log_nanomaggies(mags):
     fluxes  = np.log(mags2nanomaggies(mags))
     bad_idx = np.any(np.isinf(fluxes), axis=1)
     return fluxes[~bad_idx,:]