Пример #1
0
 def image_bbox(params, img):
     img_ymax, img_xmax = img.nelec.shape
     px, py = img.equa2pixel(params.u)
     xlim = (np.max([0,        int(np.floor(px - pixel_radius))]),
             np.min([img_xmax, int(np.ceil(px + pixel_radius))]))
     ylim = (np.max([0,        int(np.floor(py - pixel_radius))]),
             np.min([img_ymax, int(np.ceil(py + pixel_radius))]))
     return xlim, ylim
Пример #2
0
 def get_bounding_box(params, img):
     if params.is_star():
         bound = img.R
     elif params.is_galaxy():
         bound = gal_funs.gen_galaxy_psf_image_bound(params, img)
     else:
         raise "source type unknown"
     px, py = img.equa2pixel(params.u)
     xlim = (np.max([0,                  np.floor(px - bound)]),
             np.min([img.nelec.shape[1], np.ceil(px + bound)]))
     ylim = (np.max([0,                  np.floor(py - bound)]),
             np.min([img.nelec.shape[0], np.ceil(py + bound)]))
     return xlim, ylim
Пример #3
0
    def plot(self, name, plot_std=False):
        if self.X.shape[1] > 1:
            raise Exception('Dimension of X should be 1 for this method...')

        x = np.linspace(np.min(self.X), np.max(self.X), 100).reshape(-1, 1)
        self.optimize(restart=2)
        self.likelihood(self.params)
        mean, std = self.inference(x, return_std=True)
        plt.plot(x, mean, "--", label='GPR-' + str(name), color='deepskyblue')
        plt.scatter(self.X,
                    self.y,
                    label='GPR-Train' + str(name),
                    color='deepskyblue')
        if plot_std is True:
            plt.fill_between(x.ravel(),
                             mean.ravel() + 2. * std,
                             mean.ravel() - 2. * std,
                             alpha=0.2,
                             color='deepskyblue')
            plt.fill_between(x.ravel(),
                             mean.ravel() + 1. * std,
                             mean.ravel() - 1. * std,
                             alpha=0.3,
                             color='deepskyblue')
            plt.xlabel('$x$')
        plt.ylabel('$y$')
        plt.legend()
Пример #4
0
    def plot_cost_history(self, ax, history, start):
        # plotting colors
        colors = ['k']

        # plot cost function history
        ax.plot(np.arange(start, len(history), 1),
                history[start:],
                linewidth=3,
                color='k')

        # clean up panel / axes labels
        xlabel = 'step $k$'
        ylabel = r'$g\left(\mathbf{w}^k\right)$'
        ax.set_xlabel(xlabel, fontsize=14)
        ax.set_ylabel(ylabel, fontsize=14, rotation=0, labelpad=25)
        title = 'cost history'
        ax.set_title(title, fontsize=18)

        # plotting limits
        xmin = 0
        xmax = len(history)
        xgap = xmax * 0.05

        xmin -= xgap
        xmax += xgap
        ymin = np.min(history)
        ymax = np.max(history)
        ygap = ymax * 0.05
        ymin -= ygap
        ymax += ygap

        ax.set_xlim([xmin, xmax])
        ax.set_ylim([ymin, ymax])
Пример #5
0
    def geom(self, x, geom_ord=[0], k=100):

        loglik = None
        agrad = None
        HessApply = None
        eigs = None

        # get log-likelihood
        if any(s >= 0 for s in geom_ord):
            loglik = -self.cost(x)

        # get gradient
        if any(s >= 1 for s in geom_ord):
            g = np.zeros_like(x)
            agrad = -self.grad(x, g)

        # get Hessian Apply
        if any(s >= 1.5 for s in geom_ord):
            HessApply = None

        # get estimated eigen-decomposition for the Hessian (or Gauss-Newton)
        if any(s > 1 for s in geom_ord):
            # eigs = (np.array([1., 0.1]), np.array([np.ones_like(x),-np.ones_like(x)]))
            # eigs = (np.ones(1), np.ones_like(x))
            eigs = self.eigdecomp(x, k=np.min([self.dimension, k]))

        return loglik, agrad, HessApply, eigs
Пример #6
0
def reparam_gradient(alpha,m,x,K,alphaz,corr=True,B=0):
    gradient = np.zeros((alpha.shape[0],2))
    if B == 0:
        assert np.min(alpha)>= 1.,"Needs alpha boost"
        lmbda = npr.gamma(alpha,1.)
        lmbda[lmbda < 1e-300] = 1e-300
        zw = m*lmbda/alpha
        epsilon = calc_epsilon(lmbda, alpha)
        h_val = gamma_h(epsilon, alpha)
        h_der = gamma_grad_h(epsilon, alpha)
        logp_der = grad_logp(zw, K, x, alphaz)
        gradient[:,0] = logp_der*m*(alpha*h_der-h_val)/alpha**2
        gradient[:,1] = logp_der*h_val/alpha
        gradient += grad_entropy(alpha,m)
        if corr:
            gradient[:,0] += logp(zw, K, x, alphaz)*gamma_correction(epsilon, alpha)
    else:
        lmbda = npr.gamma(alpha+B,1.)
        lmbda[lmbda < 1e-5] = 1e-5
        u = npr.rand(alpha.shape[0],B)
        epsilon = calc_epsilon(lmbda, alpha+B)
        h_val = gamma_h_boosted(epsilon,u,alpha)
        h_der = gamma_grad_h_boosted(epsilon,u,alpha)
        zw = h_val*m/alpha
        zw[zw < 1e-5] = 1e-5
        logp_der = grad_logp(zw, K, x, alphaz)
        gradient[:,0] = logp_der*m*(alpha*h_der-h_val)/alpha**2
        gradient[:,1] = logp_der*h_val/alpha
        gradient += grad_entropy(alpha,m)
        if corr:
            gradient[:,0] += logp(zw, K, x, alphaz)*gamma_correction(epsilon, alpha+B)
    return gradient
Пример #7
0
def plot_images(images,
                ax,
                ims_per_row=5,
                padding=5,
                digit_dimensions=(28, 28),
                cmap=matplotlib.cm.binary,
                vmin=None,
                vmax=None):
    """Images should be a (N_images x pixels) matrix."""
    N_images = images.shape[0]
    N_rows = np.ceil(float(N_images) / ims_per_row)
    pad_value = np.min(images.ravel())
    concat_images = np.full(
        ((digit_dimensions[0] + padding) * N_rows + padding,
         (digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
    for i in range(N_images):
        cur_image = np.reshape(images[i, :], digit_dimensions)
        row_ix = i // ims_per_row
        col_ix = i % ims_per_row
        row_start = padding + (padding + digit_dimensions[0]) * row_ix
        col_start = padding + (padding + digit_dimensions[1]) * col_ix
        concat_images[row_start:row_start + digit_dimensions[0],
                      col_start:col_start + digit_dimensions[1]] = cur_image
    cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
    plt.xticks(np.array([]))
    plt.yticks(np.array([]))
    return cax
Пример #8
0
    def get_sharp_TE_airfoil(self):
        # Returns a version of the airfoil with a sharp trailing edge.

        upper_original_coors = self.upper_coordinates(
        )  # Note: includes leading edge point, be careful about duplicates
        lower_original_coors = self.lower_coordinates(
        )  # Note: includes leading edge point, be careful about duplicates

        # Find data about the TE

        # Get the scale factor
        x_mcl = self.mcl_coordinates[:, 0]
        x_max = np.max(x_mcl)
        x_min = np.min(x_mcl)
        scale_factor = (x_mcl - x_min) / (x_max - x_min)  # linear contraction

        # Do the contraction
        upper_minus_mcl_adjusted = self.upper_minus_mcl - self.upper_minus_mcl[
            -1, :] * np.expand_dims(scale_factor, 1)

        # Recreate coordinates
        upper_coordinates_adjusted = np.flipud(self.mcl_coordinates +
                                               upper_minus_mcl_adjusted)
        lower_coordinates_adjusted = self.mcl_coordinates - upper_minus_mcl_adjusted

        coordinates = np.vstack(
            (upper_coordinates_adjusted[:-1, :], lower_coordinates_adjusted))

        # Make a new airfoil with the coordinates
        name = self.name + ", with sharp TE"
        new_airfoil = Airfoil(name=name,
                              coordinates=coordinates,
                              repanel=False)

        return new_airfoil
    def hmc(self, position_current, momentum_current):
        ### Refresh momentum
        momentum_current = self.sample_momentum(1)

        ### Simulate Hamiltonian dynamics using Leap Frog
        position_proposal, momentum_proposal = self.leap_frog(position_current, momentum_current)

        # compute total energy in current position and proposal position
        current_total_energy = self.total_energy(position_current, momentum_current)
        proposal_total_energy = self.total_energy(position_proposal, momentum_proposal)

        ### Output for diganostic mode
        if self.params['diagnostic_mode']:
            print('potential energy change:',
                  self.potential_energy(position_current),
                  self.potential_energy(position_proposal))
            print('kinetic energy change:',
                  self.kinetic_energy(momentum_current),
                  self.kinetic_energy(momentum_proposal))
            print('total enregy change:',
                  current_total_energy,
                  proposal_total_energy)
            print('\n\n')

        ### Metropolis Hastings Step
        # comute accept probability
        accept_prob = np.min([1, np.exp(current_total_energy - proposal_total_energy)])
        # accept proposal with accept probability
        if self.random.rand() < accept_prob:
            self.accepts += 1.
            position_current = np.copy(position_proposal)
            momentum_current = momentum_proposal

        return position_current, momentum_current
Пример #10
0
    def get_camber_at_chord_fraction_legacy(self, chord_fraction):
        # Returns the (interpolated) camber at a given location(s). The location is specified by the chord fraction, as measured from the leading edge. Camber is nondimensionalized by chord (i.e. this function returns camber/c at a given x/c).
        chord = np.max(self.coordinates[:, 0]) - np.min(
            self.coordinates[:, 0]
        )  # This should always be 1, but this is just coded for robustness.

        x = chord_fraction * chord + min(self.coordinates[:, 0])

        upperCoors = self.upper_coordinates()
        lowerCoors = self.lower_coordinates()

        y_upper_func = sp_interp.interp1d(x=upperCoors[:, 0],
                                          y=upperCoors[:, 1],
                                          copy=False,
                                          fill_value='extrapolate')
        y_lower_func = sp_interp.interp1d(x=lowerCoors[:, 0],
                                          y=lowerCoors[:, 1],
                                          copy=False,
                                          fill_value='extrapolate')

        y_upper = y_upper_func(x)
        y_lower = y_lower_func(x)

        camber = (y_upper + y_lower) / 2

        return camber
Пример #11
0
    def variational_objective(params, t, num_samples, beta=1.):
        """Provides a stochastic estimate of the variational lower bound."""

        # 1. draw samples from the variational posterior, eps ~ N(0,I)
        zs, ldet_sums = draw_variational_samples(params, num_samples)

        # 1.5 negative entropy of z0 --- likely we need this for KL though
        # not needed for optimization

        # 2. compute expected value of the sum of jacobian terms
        E_ldet_sum = np.mean(ldet_sums)

        # 3. compute data term
        lls = logprob(zs, t)
        E_logprob = np.mean(lls)

        if debug_print:
            print "entropy term: ", E_ldet_sum
            print "data term   : ", E_logprob, " (+/- ", np.std(
                lls), ")", " min = ", np.min(lls)

        # return lower bound
        beta = 1. if t >= len(beta_schedule) else beta_schedule[t]
        lower_bound = beta * E_logprob + E_ldet_sum
        return -lower_bound
Пример #12
0
def line_point_dist(lines, ps):
    """
    Closest distance of a point to a line segment defined by two points (a, b).
    The arguments can also be lists of lines and points, in that case the distance for
    each combination is returned, with shape lines.shape[:-2] + ps.shape[:-1].
    """

    assert(lines.shape[-2:] == (2, 2))
    assert(ps.shape[-1] == 2)
    a = lines[...,0,:]
    b = lines[...,1,:]
    for _ in range(max(len(ps.shape)-1, 1)):
        a = np.expand_dims(a, -2)
        b = np.expand_dims(b, -2)
    # ps = np.expand_dims(ps, 0)

    v_hat = (b - a) / np.expand_dims(norm(b - a), -1)
    
    # d_along.shape == (v_hat.shape[0], ps.shape[0])
    # i.e. one scalar product for each line-point combination
    d_along = np.sum(v_hat*(ps - a), axis=-1)
    d_normal = np.abs(cross(v_hat, ps - a))
    assert(d_along.shape == d_normal.shape)

    d_ends = np.min(np.array([norm(ps-a), norm(ps-b)]), axis=0)

    # if p lies along the sides of the line use the normal distance,
    # else the distance to one of the ends
    mask = (0 <= d_along) & (d_along <= norm(b - a))
    return np.where(mask, d_normal, d_ends)
Пример #13
0
def expected_improvement(x, gaussian_process, evaluated_loss):
    """ expected_improvement

    Expected improvement acquisition function.

    Arguments:
    ----------
        x: array-like, shape = [n_samples, n_hyperparams]
            The point for which the expected improvement needs to be computed.
        gaussian_process: GaussianProcessRegressor object.
            Gaussian process trained on previously evaluated hyperparameters.
        evaluated_loss: Numpy array.
            Numpy array that contains the values off the loss function for the previously
            evaluated hyperparameters.
    """

    x_to_predict = x.reshape(1, -1)

    mu, sigma = gaussian_process.predict(x_to_predict, return_std=True)

    loss_optimum = np.min(evaluated_loss)

    # In case sigma equals zero
    with np.errstate(divide='ignore'):
        Z = (mu - loss_optimum) / sigma
        expected_improvement = (
            mu - loss_optimum) * norm.cdf(Z) + sigma * norm.pdf(Z)
        expected_improvement[sigma == 0.0] == 0.0

    return expected_improvement
def random_eval_experiment():
    '''
    Experiment illutrating how quickly global random evaluation will fail as a method of optimization.  Output is minimum value attained by random sampling over the cube [-1,1] x [-1,1] x... [-1,1] evaluating simple quadratic for 100, 1000, or 10000 times.  The dimension is increased from 1 to 100 and the minimum plotted for each dimension.
    '''
    # define symmetric quadratic N-dimensional
    g = lambda w: np.dot(w.T, w)

    # loop over dimensions, sample points, evaluate
    mean_evals = []
    big_dim = 100
    num_pts = 10000
    pt_stops = [100, 1000, 10000]
    for dim in range(big_dim):
        dim_eval = []
        m_eval = []
        for pt in range(num_pts):
            # generate random point using uniform
            r = 2 * np.random.rand(dim + 1) - 1
            e = g(r)
            dim_eval.append(e)

            # record mean and std of so many pts
            if (pt + 1) in pt_stops:
                m_eval.append(np.min(dim_eval))
        mean_evals.append(m_eval)

    # convert to array for easy access
    mean_evals_global = np.asarray(mean_evals)

    fig = plt.figure(figsize=(6, 3))

    # create subplot with 3 panels, plot input function in center plot
    gs = gridspec.GridSpec(1, 1, width_ratios=[1])
    fig.subplots_adjust(wspace=0.5, hspace=0.01)

    # plot input function
    ax = plt.subplot(gs[0])

    for k in range(len(pt_stops)):
        mean_evals = mean_evals_global[:, k]

        # scatter plot mean value
        ax.plot(np.arange(big_dim) + 1, mean_evals)

        # clean up plot - label axes, etc.,
        ax.set_xlabel('dimension of input')
        ax.set_ylabel('funciton value')

    # draw legend
    t = [str(p) for p in pt_stops]
    ax.legend(t, bbox_to_anchor=(1, 0.5))

    # draw horizontal axis
    ax.plot(np.arange(big_dim) + 1,
            np.arange(big_dim) * 0,
            linewidth=1,
            linestyle='--',
            color='k')
    plt.show()
Пример #15
0
 def _parameter_initialiser(self, x, c=None, n=None, offset=False):
     log_x = np.log(x)
     log_x[np.isnan(log_x)] = 0
     gumb = para.Gumbel.fit(log_x, c, n, how='MLE')
     if not gumb.res.success:
         gumb = para.Gumbel.fit(log_x, c, n, how='MPP')
     mu, sigma = gumb.params
     alpha, beta = np.exp(mu), 1. / sigma
     if (np.isinf(alpha) | np.isnan(alpha)):
         alpha = np.median(x)
     if (np.isinf(beta) | np.isnan(beta)):
         beta = 1.
     if offset:
         gamma = np.min(x) - (np.max(x) - np.min(x)) / 10.
         return gamma, alpha, beta, 1.
     else:
         return alpha, beta, 1.
Пример #16
0
    def show_2d_classifier(self, ax, w_best, run, **kwargs):
        cost = run.cost
        predict = run.model
        feat = run.feature_transforms
        normalizer = run.normalizer

        ### create surface and boundary plot ###
        xmin1 = np.min(copy.deepcopy(self.x[:, 0]))
        xmax1 = np.max(copy.deepcopy(self.x[:, 0]))
        xgap1 = (xmax1 - xmin1) * 0.05
        xmin1 -= xgap1
        xmax1 += xgap1

        xmin2 = np.min(copy.deepcopy(self.x[:, 1]))
        xmax2 = np.max(copy.deepcopy(self.x[:, 1]))
        xgap2 = (xmax2 - xmin2) * 0.05
        xmin2 -= xgap2
        xmax2 += xgap2

        # plot boundary for 2d plot
        r1 = np.linspace(xmin1, xmax1, 500)
        r2 = np.linspace(xmin2, xmax2, 500)
        s, t = np.meshgrid(r1, r2)
        s = np.reshape(s, (np.size(s), 1))
        t = np.reshape(t, (np.size(t), 1))
        h = np.concatenate((s, t), axis=1)
        z = predict(normalizer(h.T), w_best)
        z = np.sign(z)

        # reshape it
        s.shape = (np.size(r1), np.size(r2))
        t.shape = (np.size(r1), np.size(r2))
        z.shape = (np.size(r1), np.size(r2))

        #### plot contour, color regions ####
        ax.contour(s, t, z, colors='k', linewidths=2.5, levels=[0], zorder=2)
        ax.contourf(s,
                    t,
                    z,
                    colors=[self.colors[1], self.colors[0]],
                    alpha=0.15,
                    levels=range(-1, 2))

        # cleanup panel
        ax.set_xlim([xmin1, xmax1])
        ax.set_ylim([xmin2, xmax2])
Пример #17
0
def timing_error(bubble_position, piezo_timings):
    """Calculate the mean squared error of the expected timings for a certain bubble positions versus those actually observed"""
    # Get the expected overall times of flight to the piezos for this bubble
    times_of_flight = expected_times_of_flight(bubble_position)
    # Subtract the minimum time of flight from all of them, so that the first signal is at time 0
    expected_timings = times_of_flight - np.min(times_of_flight)
    # Return the mean squared error between the actually observed piezo timings and the timings that would be expected for this bubble
    return np.linalg.norm(expected_timings - piezo_timings)
Пример #18
0
def get_starlet_shape(shape, lvl=None):
    """ Get the pad shape for a starlet transform
    """
    #Number of levels for the Starlet decomposition
    lvl_max = np.int(np.log2(np.min(shape[-2:])))
    if (lvl is None) or lvl > lvl_max:
        lvl = lvl_max
    return int(lvl)
Пример #19
0
 def ab_cb(self, x, a, b, N, alpha=0.05):
     # Parameter confidence intervals from here:
     # https://mathoverflow.net/questions/278675/confidence-intervals-for-the-endpoints-of-the-uniform-distribution
     #
     sample_range = np.max(x) - np.min(x)
     fun = lambda c : self.p(c, N)
     c_hat = minimize(fun, 1.).x
     return a - c_hat*sample_range, b + c_hat*sample_range
Пример #20
0
def get_standard_error_matrix(betahat, y, x, w, se_group=None):
    """Return the standard error matrix for the regression estimate betahat.

    If se_group is None, compute the ordinary regression standard error.
    Otherwise, compute the robust standard errors using the grouping given by
    se_group, which is assumed to be integers 0:(num_groups - 1).

    Note that se_group must be zero-indexed, and the number of groups is taken
    to be the largest index plus one.  (This behavior is implicitly assumed in
    group_sum.)

    With the se_group option, no finite-sample bias adjustment is applied.
    For example, the resulting ses should be equivalent to calling the
    R function

    sandwich::vcovCL(..., cluster=se_group, type="HC0", cadjust=FALSE)
    """

    resid = y - x @ betahat

    # For now, I am taking the weights to parameterize a change to the
    # objective function rather than a change to the empirical distribution.
    # See email from me to Rachael and Tamara on Jan 31, 2020, 2:50 PM
    # for more discussion of this subtle point.
    if se_group is None:
        # I am using num_obs instead of np.sum(w) because w does not
        # parameterize the empirical distribution.
        num_obs = len(y)
        xtx_bar = np.einsum('ni,nj,n->ij', x, x, w) / num_obs
        sigma2hat = np.sum(w * (resid ** 2)) / (num_obs - len(betahat))
        xtx_inv = np.linalg.inv(xtx_bar)
        se2 = sigma2hat * xtx_inv / num_obs
        return se2
    else:

        if len(se_group) != len(y):
            raise ValueError("se_group must be the same length as the data.")
        #resid =  y - x @ betahat

        if np.min(se_group) != 0:
            raise ValueError('se_group must be zero-indexed ' +
                             '(its minimum must be zero)')

        # Calculate the sample variance of the gradient where each group
        # is treated as a single observation.
        grad = w[:, None] * resid[:, None] * x
        grad_grouped = grouped_sum(grad, se_group)
        num_groups = grad_grouped.shape[0]
        grad2_mean = np.einsum('gi,gj->ij',
                               grad_grouped, grad_grouped) / num_groups
        grad_mean = np.einsum('gi->i', grad_grouped) / num_groups
        grad_cov = grad2_mean - np.outer(grad_mean, grad_mean)

        # Weight by the Hessian.
        xtx_bar = np.einsum('ni,nj,n->ij', x, x, w) / num_groups
        hinv_grad_cov = np.linalg.solve(xtx_bar, grad_cov)
        se2 = np.linalg.solve(xtx_bar, hinv_grad_cov.T) / num_groups
        return se2
def plot_multiple_sequences(seq1, seq2, seq3):
    # initialize figure
    fig = plt.figure(figsize=(10, 5))

    # create subplot with 3 panels, plot input function in center plot
    gs = gridspec.GridSpec(2, 1)
    ax1 = plt.subplot(gs[1])
    ax2 = plt.subplot(gs[0])

    ax1.plot(np.arange(np.size(seq1)), seq1.flatten(), c='k', linewidth=2.5)
    ax2.plot(np.arange(np.size(seq2)),
             seq2.flatten(),
             c='lime',
             linewidth=2.5,
             label='sequence 1',
             zorder=2)
    ax2.plot(np.arange(np.size(seq3)),
             seq3.flatten(),
             c='m',
             linewidth=2.5,
             label='sequence 2',
             zorder=1)

    # label axes and title
    ax1.set_title('input sequence')
    ax1.set_xlabel('step')
    ax2.set_title('output sequences')
    ax2.set_xlabel('step')

    # set viewing limits
    s1min = np.min(copy.deepcopy(seq1))
    s1max = np.max(copy.deepcopy(seq1))
    s1gap = (s1max - s1min) * 0.1
    s1min -= s1gap
    s1max += s1gap
    ax1.set_ylim([s1min, s1max])

    s2min = np.min(copy.deepcopy(seq2))
    s2max = np.max(copy.deepcopy(seq2))
    s2gap = (s2max - s2min) * 0.1
    s2min -= s2gap
    s2max += s2gap
    ax2.legend(loc=1)

    plt.show()
Пример #22
0
def prox_sdss_symmetry(X, step):
    """SDSS/HSC symmetry operator

    This function uses the *minimum* of the two
    symmetric pixels in the update.
    """
    Xs = np.fliplr(np.flipud(X))
    X[:] = np.min([X, Xs], axis=0)
    return X
Пример #23
0
    def render(self, model):
        """Resample and convolve a model in the observation frame for display only!
        Parameters
        ----------
        model: array
            The model in some other data frame.
        Returns
        -------
        model_: array
            The convolved and resampled `model` in the observation frame.
        """
        img = np.zeros(self.frame.shape)

        img[:, np.min(self._coord_lr[0]).astype(int):np.max(self._coord_lr[0]).astype(int) + 1,
        np.min(self._coord_lr[1]).astype(int):np.max(self._coord_lr[1]).astype(int) + 1] = \
            self._render(model)

        return img
Пример #24
0
def add_jitter(kernel, jitter=1e-5):

    # Add the jitter
    diag_indices = np.diag_indices(np.min(kernel.shape[:2]))
    to_add = np.zeros_like(kernel)
    to_add[diag_indices] += jitter
    kernel = kernel + to_add

    return kernel
Пример #25
0
def to_lane_dist(lanes_col1, lanes_col2):
    # use middle point to calcu dist_y?
    sample_num = lanes_col1.shape[0]
    dist_y = np.min(abs(np.hstack((lanes_col1[:, 1].reshape(sample_num, 1), lanes_col2[:, 1].reshape(sample_num, 1)))), axis=1)  # nearest y in each lane

    theta = np.arctan((lanes_col1[:, 0] - lanes_col2[:, 0]) / (lanes_col2[:, 1] - lanes_col1[:, 1]))
    rho = abs(lanes_col1[:, 0] * np.cos(theta) + lanes_col1[:, 1] * np.sin(theta))  # offset to lane
    dist = dist_y + rho
    return dist
Пример #26
0
    def __init__(self,
                 latent_dim,
                 noise_dim,
                 model_directory,
                 latent=None,
                 full=False,
                 config_fname='op_conditions.ini'):

        self.latent = latent
        self.latent_dim = latent_dim
        self.noise_dim = noise_dim
        if noise_dim == 0:
            full = False
        self.full = full

        if (not full) and (self.latent is None):
            self.dim = self.latent_dim
            self.bounds = np.array([[0., 1.]])
            self.bounds = np.tile(self.bounds, [self.dim, 1])
        else:
            self.dim = self.latent_dim + self.noise_dim
            if self.latent is not None:
                assert len(self.latent) == self.latent_dim
                latent_bounds = np.vstack((latent - 0.1, latent + 0.1)).T
            else:
                latent_bounds = np.array([0., 1.])
                latent_bounds = np.tile(latent_bounds, [self.latent_dim, 1])
            noise_bounds = np.array([-0.5, 0.5])
            noise_bounds = np.tile(noise_bounds, [self.noise_dim, 1])
            self.bounds = np.vstack((latent_bounds, noise_bounds))

        # Expand bounds by 20%
        b = self.bounds
        r = np.max(b, axis=1) - np.min(b, axis=1)
        self.bounds = np.zeros_like(b)
        self.bounds[:, 0] = b[:, 0] - 0.2 * r
        self.bounds[:, 1] = b[:, 1] + 0.2 * r

        self.y = None
        self.config_fname = config_fname

        self.gan = GAN(self.latent_dim, self.noise_dim, 192, 31, (0., 1.))
        self.gan.restore(model_directory)

        n_points = self.gan.X_shape[0]
        x_synth = self.gan.x_fake_test
        x_synth_ = tf.squeeze(x_synth)
        self.x_target = tf.placeholder(tf.float32, shape=[n_points, 2])
        self.e = tf.reduce_mean(
            tf.reduce_sum(tf.square(x_synth_ - self.x_target), axis=1))
        if self.full:
            self.grad_e = tf.concat(tf.gradients(self.e,
                                                 [self.gan.c, self.gan.z]),
                                    axis=1)
        else:
            self.grad_e = tf.gradients(self.e, self.gan.c)
Пример #27
0
def adagrad_optimize(n_iters,
                     objective_and_grad,
                     init_param,
                     has_log_norm=False,
                     window=10,
                     learning_rate=.01,
                     epsilon=.1,
                     learning_rate_end=None):
    local_grad_history = []
    local_log_norm_history = []
    value_history = []
    log_norm_history = []
    variational_param = init_param.copy()
    variational_param_history = []
    with tqdm.trange(n_iters) as progress:
        try:
            schedule = learning_rate_schedule(n_iters, learning_rate,
                                              learning_rate_end)
            for i, curr_learning_rate in zip(progress, schedule):
                prev_variational_param = variational_param
                if has_log_norm:
                    obj_val, obj_grad, log_norm = objective_and_grad(
                        variational_param)
                else:
                    obj_val, obj_grad = objective_and_grad(variational_param)
                    log_norm = 0
                value_history.append(obj_val)
                local_grad_history.append(obj_grad)
                local_log_norm_history.append(log_norm)
                log_norm_history.append(log_norm)
                if len(local_grad_history) > window:
                    local_grad_history.pop(0)
                    local_log_norm_history.pop(0)
                grad_scale = np.exp(
                    np.min(local_log_norm_history) -
                    np.array(local_log_norm_history))
                scaled_grads = grad_scale[:, np.newaxis] * np.array(
                    local_grad_history)
                accum_sum = np.sum(scaled_grads**2, axis=0)
                variational_param = variational_param - curr_learning_rate * obj_grad / np.sqrt(
                    epsilon + accum_sum)
                if i >= 3 * n_iters // 4:
                    variational_param_history.append(variational_param.copy())
                if i % 10 == 0:
                    avg_loss = np.mean(value_history[max(0, i - 1000):i + 1])
                    progress.set_description(
                        'Average Loss = {:,.5g}'.format(avg_loss))
        except (KeyboardInterrupt, StopIteration) as e:  # pragma: no cover
            # do not print log on the same line
            progress.close()
        finally:
            progress.close()
    variational_param_history = np.array(variational_param_history)
    smoothed_opt_param = np.mean(variational_param_history, axis=0)
    return (smoothed_opt_param, variational_param_history,
            np.array(value_history), np.array(log_norm_history))
    def __init__(self, mle_par, prior_par, x_mat, y_vec, y_g_vec):

        self.mle_par = copy.deepcopy(mle_par)
        self.prior_par = copy.deepcopy(prior_par)
        self.x_mat = np.array(x_mat)
        self.y_vec = np.array(y_vec)
        self.y_g_vec = np.array(y_g_vec)

        assert np.min(y_g_vec) == 0
        assert np.max(y_g_vec) == self.mle_par['u'].size() - 1
Пример #29
0
def compute_khat_iterates(iterate_chains,
                          warmup=0.85,
                          param_idx=0,
                          increasing=True):
    """
    Compute the khat over iterates for a variational parameter after removing warmup.
    Parameters
    ----------
    iterate_chains : multi-dimensional array, shape=(n_chains, n_iters, n_var_params)

    warmup : warmup iterates

    param_idx : index of the variational parameter

    increasing : boolean sort array in increasing order: TRUE or decreasing order:FALSE

    fraction: the fraction of iterates
    Returns
    -------
    maximum of khat over all chains for the variational parameter param_idx

    """
    chains = iterate_chains[:, :, param_idx]
    n_iters = chains.shape[1]
    n_chains = chains.shape[0]

    k_hat_values = np.zeros(n_chains)
    for i in range(n_chains):
        if increasing:
            sorted_chain = np.sort(chains[i, :])
        else:
            sorted_chain = np.sort(-chains[i, :])

        ind_last = int(n_iters * warmup)
        filtered_chain = sorted_chain[ind_last:]
        if increasing:
            filtered_chain = filtered_chain - np.min(filtered_chain)
        else:
            filtered_chain = filtered_chain - np.min(filtered_chain)
        k_post, _ = gpdfit(filtered_chain)
        k_hat_values[i] = k_post

    return np.nanmax(k_hat_values)
Пример #30
0
def rand_psd(n, minew=0.1, maxew=1.):
    X = np.random.randn(n,n)
    S = np.dot(T_(X), X)
    S = sym(S)
    ew, ev = np.linalg.eigh(S)
    ew -= np.min(ew)
    ew /= np.max(ew)
    ew *= (maxew - minew)
    ew += minew
    return dot3(ev, np.diag(ew), T_(ev))
Пример #31
0
    def _space_constraint(self, x_in, min_dist):
        x = np.nan_to_num(x_in[0:self.nturbs])
        y = np.nan_to_num(x_in[self.nturbs:])

        dist = [np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2) \
                for i in range(self.nturbs) \
                for j in range(self.nturbs) if i != j]

        return np.min(dist) - self._norm(min_dist, self.bndx_min,
                                         self.bndx_max)
    def scatter_3d_points(self, x, ax):
        # set plotting limits
        xmax = copy.deepcopy(np.max(x[0, :]))
        xmin = copy.deepcopy(np.min(x[0, :]))
        xgap = (xmax - xmin) * 0.2
        xmin -= xgap
        xmax += xgap

        xmax1 = copy.deepcopy(np.max(x[1, :]))
        xmin1 = copy.deepcopy(np.min(x[1, :]))
        xgap1 = (xmax1 - xmin1) * 0.2
        xmin1 -= xgap1
        xmax1 += xgap1

        ymax = copy.deepcopy(np.max(self.y))
        ymin = copy.deepcopy(np.min(self.y))
        ygap = (ymax - ymin) * 0.2
        ymin -= ygap
        ymax += ygap

        # plot data
        ax.scatter(x[0, :].flatten(),
                   x[1, :].flatten(),
                   self.y.flatten(),
                   color='k',
                   edgecolor='w',
                   linewidth=0.9,
                   s=40)

        # clean up panel
        ax.xaxis.pane.fill = False
        ax.yaxis.pane.fill = False
        ax.zaxis.pane.fill = False

        ax.xaxis.pane.set_edgecolor('white')
        ax.yaxis.pane.set_edgecolor('white')
        ax.zaxis.pane.set_edgecolor('white')

        ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
        ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
        ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)

        return xmin, xmax, xmin1, xmax1, ymin, ymax
Пример #33
0
def plot_data_and_pred(x, y, model, draw_verticals=True):
    x_range = np.linspace(np.min(x), np.max(x), 100)
    yhat_range = model.predict(x_range)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(x, y, 'o', label='observed')
    ax.plot(x_range, yhat_range, 'r-', label='predicted')
    if draw_verticals: # from observed value to predicted true
        yhat_sparse = model.predict(x)
        for x0, y0, yhat0 in zip(x, y, yhat_sparse):
            ax.plot([x0, x0],[y0, yhat0],'k-')
    plt.legend() #[line_pred, line_true], ['predicted', 'true'])
Пример #34
0
def plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),
                cmap=matplotlib.cm.binary, vmin=None, vmax=None):
    """Images should be a (N_images x pixels) matrix."""
    N_images = images.shape[0]
    N_rows = np.ceil(float(N_images) / ims_per_row)
    pad_value = np.min(images.ravel())
    concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,
                             (digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
    for i in range(N_images):
        cur_image = np.reshape(images[i, :], digit_dimensions)
        row_ix = i // ims_per_row
        col_ix = i % ims_per_row
        row_start = padding + (padding + digit_dimensions[0]) * row_ix
        col_start = padding + (padding + digit_dimensions[1]) * col_ix
        concat_images[row_start: row_start + digit_dimensions[0],
                      col_start: col_start + digit_dimensions[1]] = cur_image
    cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
    plt.xticks(np.array([]))
    plt.yticks(np.array([]))
    return cax
Пример #35
0
def plot_runtime(ex, fname, func_xvalues, xlabel, func_title=None):
    results = glo.ex_load_result(ex, fname)
    value_accessor = lambda job_results: job_results['time_secs']
    vf_pval = np.vectorize(value_accessor)
    # results['test_results'] is a dictionary: 
    # {'test_result': (dict from running perform_test(te) '...':..., }
    times = vf_pval(results['test_results'])
    repeats, _, n_methods = results['test_results'].shape
    time_avg = np.mean(times, axis=0)
    time_std = np.std(times, axis=0)

    xvalues = func_xvalues(results)

    #ns = np.array(results[xkey])
    #te_proportion = 1.0 - results['tr_proportion']
    #test_sizes = ns*te_proportion
    line_styles = exglo.func_plot_fmt_map()
    method_labels = exglo.get_func2label_map()
    
    func_names = [f.__name__ for f in results['method_job_funcs'] ]
    for i in range(n_methods):    
        te_proportion = 1.0 - results['tr_proportion']
        fmt = line_styles[func_names[i]]
        #plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
        method_label = method_labels[func_names[i]]
        plt.errorbar(xvalues, time_avg[:, i], yerr=time_std[:,i], fmt=fmt,
                label=method_label)
            
    ylabel = 'Time (s)'
    plt.ylabel(ylabel)
    plt.xlabel(xlabel)
    plt.gca().set_yscale('log')
    plt.xlim([np.min(xvalues), np.max(xvalues)])
    plt.xticks( xvalues, xvalues)
    plt.legend(loc='best')
    title = '%s. %d trials. '%( results['prob_label'],
            repeats ) if func_title is None else func_title(results)
    plt.title(title)
    #plt.grid()
    return results
Пример #36
0
 def fun(x): return to_scalar(np.min(x))
 d_fun = lambda x : to_scalar(grad(fun)(x))
Пример #37
0
 def fun(x): return to_scalar(np.min(x, axis=1, keepdims=True))
 d_fun = lambda x : to_scalar(grad(fun)(x))
Пример #38
0
def polyinterp(points, doPlot=None, xminBound=None, xmaxBound=None):
    """ polynomial interpolation
    Parameters
    ----------
    points: shape(pointNum, 3), three columns represents x, f, g
    doPolot: set to 1 to plot, default 0
    xmin: min value that brackets minimum (default: min of points)
    xmax: max value that brackets maximum (default: max of points)
    
    set f or g to sqrt(-1)=1j if they are not known
    the order of the polynomial is the number of known f and g values minus 1

    Returns
    -------
    minPos:
    fmin:
    """
    
    if doPlot == None:
        doPlot = 0

    nPoints = points.shape[0]
    order = np.sum(np.imag(points[:, 1:3]) == 0) -1
    
    # code for most common case: cubic interpolation of 2 points
    if nPoints == 2 and order == 3 and doPlot == 0:
        [minVal, minPos] = [np.min(points[:,0]), np.argmin(points[:,0])]
        notMinPos = 1 - minPos
        d1 = points[minPos,2] + points[notMinPos,2] - 3*(points[minPos,1]-\
                points[notMinPos,1])/(points[minPos,0]-points[notMinPos,0])

        t_d2 =  d1**2 - points[minPos,2]*points[notMinPos,2]
        if t_d2 > 0:
            d2 = np.sqrt(t_d2)
        else:
            d2 = np.sqrt(-t_d2) * np.complex(0,1)
        if np.isreal(d2):
            t = points[notMinPos,0] - (points[notMinPos,0]-points[minPos,0])*\
                    ((points[notMinPos,2]+d2-d1)/(points[notMinPos,2]-\
                    points[minPos,2]+2*d2))
            minPos = np.min([np.max([t,points[minPos,0]]), points[notMinPos,0]])
        else:
            minPos = np.mean(points[:,0])
        fmin = minVal
        return (minPos, fmin)
    
    xmin = np.min(points[:,0])
    xmax = np.max(points[:,0])

    # compute bounds of interpolation area
    if xminBound == None:
        xminBound = xmin
    if xmaxBound == None:
        xmaxBound = xmax

    # constraints based on available function values
    A = np.zeros((0, order+1))
    b = np.zeros((0, 1))
    for i in range(nPoints):
        if np.imag(points[i,1]) == 0:
            constraint = np.zeros(order+1)
            for j in np.arange(order,-1,-1):
                constraint[order-j] = points[i,0]**j
            A = np.vstack((A, constraint))
            b = np.append(b, points[i,1])
    
    # constraints based on availabe derivatives
    for i in range(nPoints):
        if np.isreal(points[i,2]):
            constraint = np.zeros(order+1)
            for j in range(1,order+1):
                constraint[j-1] = (order-j+1)* points[i,0]**(order-j)
            A = np.vstack((A, constraint))
            b = np.append(b,points[i,2])
    
    # find interpolating polynomial
    params = np.linalg.solve(A, b)

    # compute critical points
    dParams = np.zeros(order)
    for i in range(params.size-1):
        dParams[i] = params[i] * (order-i)
    
    if np.any(np.isinf(dParams)):
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0]))
    else:
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0], \
                np.roots(dParams)))
    
    # test critical points
    fmin = np.infty;
    minPos = (xminBound + xmaxBound)/2.
    for xCP in cp:
        if np.imag(xCP) == 0 and xCP >= xminBound and xCP <= xmaxBound:
            fCP = np.polyval(params, xCP)
            if np.imag(fCP) == 0 and fCP < fmin:
                minPos = np.double(np.real(xCP))
                fmin = np.double(np.real(fCP))
    
    # plot situation (omit this part for now since we are not going to use it
    # anyway)

    return (minPos, fmin)
Пример #39
0
 def fun(x): return to_scalar(np.min(np.array([[x,     x,   x],
                                               [x,   0.5, 0.5],
                                               [0.5, 0.5, 0.5],
                                               [x,     x, 0.5]]), axis=0))
 d_fun = lambda x : to_scalar(grad(fun)(x))
Пример #40
0
def _read_kurucz_spec(f):
    """
    Read Kurucz spectra that have been precomputed

    Args:
        f (string) : path to the file to be read
        
    Returns:
        new_vel (real array) : velocity axis in km/s
        spectrum (real array) : spectrum for each velocity bin
    """
    f = open(f, "rb")
    res = f.read()
    
    n_chunk = struct.unpack('i',res[0:4])
    
    freq = []
    stokes = []
    cont = []
    
    left = 4
    
    for i in range(n_chunk[0]):
        
        right = left + 4
        n = struct.unpack('i',res[left:right])

        left = right
        right = left + 4
        nmus = struct.unpack('i',res[left:right])


        left = right
        right = left + 8*n[0]
        t1 = np.asarray(struct.unpack('d'*n[0],res[left:right]))
        freq.append(t1)        
                
        left = right
        right = left + 8*n[0]*nmus[0]

        t2 = np.asarray(struct.unpack('d'*n[0]*nmus[0],res[left:right])).reshape((n[0],nmus[0]))
        stokes.append(t2)

        left = right
        right = left + 8*n[0]*nmus[0]

        t2 = np.asarray(struct.unpack('d'*n[0]*nmus[0],res[left:right])).reshape((n[0],nmus[0]))
        cont.append(t2)
        
        left = right
        
    freq = np.concatenate(freq)
    stokes = np.concatenate(stokes)
    cont = np.concatenate(cont)

    ind = np.argsort(freq)
    freq = freq[ind]
    stokes = stokes[ind]
    cont = cont[ind]
    wavelength = const.c.to('cm/s').value / freq
    mean_wavelength = np.mean(wavelength)

    vel = (wavelength - mean_wavelength) / mean_wavelength * const.c.to('km/s').value

    nl, nmus = stokes.shape

# Reinterpolate in a equidistant velocity axis
    new_vel = np.linspace(np.min(vel), np.max(vel), nl)
    for i in range(nmus):
        interpolator = scipy.interpolate.interp1d(vel, stokes[:,i], kind='linear')
        stokes[:,i] = interpolator(new_vel)

    return new_vel, wavelength, stokes
Пример #41
0
    # using sklearn                                                #
    ################################################################
    N = 500
    #features,labels = ds.make_classification(n_samples = N,n_features = 2,n_informative = 2,n_redundant = 0,n_clusters_per_class = 1,class_sep = 2,shift = 2.2)
    features,labels = ds.make_circles(n_samples = N)
    #features,labels = ds.make_moons(n_samples = N)
    labels[labels == 0] = -1
    features = auto_np.array(features) * 4.0
    labels = auto_np.array(labels).reshape(features.shape[0],1)
    return features,labels


if __name__ == '__main__' :
    features,labels = gen_test_data()
    optimized_params = neural_net_train(features,labels,num_iter = 2000)
    #optimized_params = neural_net_train(features,labels,num_iter = 1000,opt_method = 'stepest')

    min_x = auto_np.min(features[:,0])
    max_x = auto_np.max(features[:,0])
    min_y = auto_np.min(features[:,1])
    max_y = auto_np.max(features[:,1])
    xx,yy = np.meshgrid(np.linspace(min_x,max_x,200),np.linspace(min_y,max_y,200))
    predict_features = np.c_[xx.ravel(),yy.ravel()]
    predict_labels = neural_net_predict(optimized_params,predict_features).reshape(xx.shape)
    cs = plt.contour(xx,yy,predict_labels)
    plt.clabel(cs,inline = 1,fontsize = 10)
    plt.scatter(features[labels[:,0] == 1,0],features[labels[:,0] == 1,1],c = 'red')
    plt.scatter(features[labels[:,0] == -1,0],features[labels[:,0] == -1,1],c = 'cyan')
    plt.show()

Пример #42
0
    #   phi - [0, 180], transformation log (phi / (180 - phi))
    #
    ######################################################################
    import CelestePy.util.data as du
    from sklearn.linear_model import LinearRegression
    coadd_df = du.load_celeste_dataframe("../../data/stripe_82_dataset/coadd_catalog_from_casjobs.fit")

    # make star => radial extent proposal
    star_res = coadd_df.gal_arcsec_scale[ coadd_df.is_star ].values
    star_res = np.clip(star_res, 1e-8, np.inf)
    star_res_proposal = fit_mog(np.log(star_res).reshape((-1,1)), max_comps = 20, mog_class = MixtureOfGaussians)
    with open('star_res_proposal.pkl', 'wb') as f:
        pickle.dump(star_res_proposal, f)

    if False:
        xgrid = np.linspace(np.min(np.log(star_res)), np.max(np.log(star_res)), 100)
        lpdf  = star_res_proposal.logpdf(xgrid.reshape((-1,1)))
        plt.plot(xgrid, np.exp(lpdf))
        plt.hist(np.log(star_res), 25, normed=True)
        plt.hist(np.log(star_res), 25, normed=True, alpha=.24)
        plt.hist(star_res_proposal.rvs(684).flatten(), 25, normed=True, alpha=.24)

    # make star fluxes => gal fluxes for tars
    colors    = ['ug', 'gr', 'ri', 'iz']
    star_mags = np.array([du.colors_to_mags(r, c) 
                  for r, c in zip(coadd_df.star_mag_r.values,
                      coadd_df[['star_color_%s'%c for c in colors]].values)])

    gal_mags  = np.array([du.colors_to_mags(r, c) 
                    for r, c in zip(coadd_df.gal_mag_r.values,
                        coadd_df[['gal_color_%s'%c for c in colors]].values)])