Example #1
0
def TrackCurvature(x):
    # use quadratic b-splines at each point to estimate curvature
    # i get almost the same formula i had before but it's off by a factor of 4!

    xx = np.concatenate([x[-1:], x, x[:1]])
    p0 = xx[:-2]
    p1 = xx[1:-1]
    p2 = xx[2:]
    T = p2 - p0  # track derivative
    uT = np.abs(T)
    TT = 4*(p0 - 2*p1 + p2)
    k = (np.real(T)*np.imag(TT) - np.imag(T)*np.real(TT)) / (uT**3)
    return k
Example #2
0
def test_unimplemented_falseyness():
    @contextmanager
    def remove_grad_definitions(fun):
        vjpmaker = primitive_vjps.pop(fun, None)
        yield
        if vjpmaker:
            primitive_vjps[fun] = vjpmaker

    with remove_grad_definitions(np.iscomplex):
        fun = lambda x: np.real(x**2 if np.iscomplex(x) else np.sum(x))
        check_grads(fun)(5.)
        check_grads(fun)(2. + 1j)
Example #3
0
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V
def as_scalar(x):
    vs = vspace(getval(x))
    if vs.iscomplex:
        x = np.real(x)
    if vs.shape == ():
        return x
    elif vs.size == 1:
        return x.reshape(())
    else:
        raise TypeError(
            "Output {} can't be cast to float. "
            "Function grad requires a scalar-valued function. "
            "Try jacobian or elementwise_grad.".format(getval(x)))
Example #5
0
def QuadFitCurvatureMap(x):
    curv = []

    for i in range(len(x)):
        # do a look-ahead quadratic fit, just like the car would do
        pts = x[(np.arange(6) + i) % len(x)] / 100  # convert to meters
        basis = (pts[1] - pts[0]) / np.abs(pts[1] - pts[0])

        # project onto forward direction
        pts = (np.conj(basis) * (pts - pts[0]))

        p = np.polyfit(np.real(pts), np.imag(pts), 2)
        curv.append(p[0] / 2)

    return np.float32(curv)
Example #6
0
def log_likelihood(Data,
                   frequencies,
                   DL,
                   t_c,
                   phi_c,
                   chirpm,
                   symmratio,
                   spin1,
                   spin2,
                   alpha_squared,
                   bppe,
                   NSflag,
                   N_detectors,
                   detector,
                   cosmology=cosmology.Planck15):
    #Z = Distance(DL/mpc,unit=u.Mpc).compute_z(cosmology = cosmology)
    #chirpme = chirpm/(1+Z)
    mass1 = utilities.calculate_mass1(chirpm, symmratio)
    mass2 = utilities.calculate_mass2(chirpm, symmratio)
    model = dcsimr_detector_frame(mass1=mass1,
                                  mass2=mass2,
                                  spin1=spin1,
                                  spin2=spin2,
                                  collision_time=t_c,
                                  collision_phase=phi_c,
                                  Luminosity_Distance=DL,
                                  phase_mod=alpha_squared,
                                  cosmo_model=cosmology,
                                  NSflag=NSflag,
                                  N_detectors=N_detectors)
    frequencies = np.asarray(frequencies)
    amp, phase, hreal = model.calculate_waveform_vector(frequencies)
    #h_complex = np.multiply(amp,np.add(np.cos(phase),-1j*np.sin(phase)))
    h_complex = amp * np.exp(-1j * phase)
    noise_temp, noise_func, freq = model.populate_noise(detector=detector,
                                                        int_scheme='quad')
    resid = np.subtract(Data, h_complex)
    #integrand_numerator = np.multiply(np.conjugate(Data), h_complex) + np.multiply(Data,np.conjugate( h_complex))
    integrand_numerator = np.multiply(resid, np.conjugate(resid))

    noise_root = noise_func(frequencies)
    noise = np.multiply(noise_root, noise_root)
    integrand = np.divide(integrand_numerator, noise)
    integral = np.real(simps(integrand, frequencies))
    return -2 * integral
def grad_k(ws, fdensity, alpha, sig, psf_k):
    #print('grad_k begin');
    mo = np.exp(-4.)
    ws = real_to_complex(ws)
    ws = ws.reshape((n_grid, n_grid))
    #wk = ws;
    #ws = np.real(fft.ifft2(ws));

    l1 = -1 * fft.ifft2(
        (np.real(fft.ifft2(ws * psf_k)) - data) / sig_noise**2) * psf_k
    #print(l1-l1_other)
    l1 = l1.flatten()
    l_tot = l1
    #return l1,l2;
    l_tot = complex_to_real(l_tot)
    #print('grad is');
    #print(l_tot);
    return l_tot
Example #8
0
    def list_simulate_spectral(cov, J, n_simulate=1000, seed=82):
        """
        Simulate the null distribution using the spectrums of the covariance
        matrix.  This is intended to be used to approximate the null
        distribution.

        Return (a numpy array of simulated n*FSSD values, eigenvalues of cov)
        """
        # eigen decompose
        eigs, _ = np.linalg.eig(cov)
        eigs = np.real(eigs)
        # sort in decreasing order
        eigs = -np.sort(-eigs)
        sim_fssds = FSSD.simulate_null_dist(eigs,
                                            J,
                                            n_simulate=n_simulate,
                                            seed=seed)
        return sim_fssds, eigs
Example #9
0
def best_invec_phase(x, y, **kwargs):
    """Computes the l2-distance between `x` and `y` up to a global phasefactor.

    .. math::

        \min_\phi \Vert x - \mathrm{e}^{i \phi} y \Vert_2

    :param x, y: Input vectors of same length
    :param kwargs: Parameters passed to `scipy.optimize.minimize`
    :returns: Minimal distane (and possibly optimal vector `y`)

    """
    norm_sq = lambda x: np.real(np.dot(np.conj(x.ravel()), x.ravel()))
    cost = lambda phi: norm_sq(x - np.exp(1.j * phi) * y)
    # Choose initialization randomly to evade maximimum at opposite side
    result = minimize(cost, rand_angles(), jac=grad(cost), **kwargs)
    y_ = np.exp(1.j * result['x']) * y
    return y_, result['fun']
Example #10
0
    def forward(self, x):
        x = torch.tensor(npa.real(npa.fft.fft(x.to('cpu').numpy(),
                                              axis=2))).to('cuda')
        x, edge_index = x, self.coos[0].to(x.device)
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = gcn_pool_4(x)

        edge_index = self.coos[2].to(x.device)
        x = self.conv2(x, edge_index)
        x = F.relu(x)
        x = gcn_pool_4(x)

        x = x.view(x.shape[0], -1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
Example #11
0
def nn_predict_tgcn_cheb(params, x):

    L = graph.rescale_L(hyper['L'][0], lmax=2)
    w = np.fft.fft(x, axis=2)
    xc = chebyshev_time_vertex(L, w, hyper['filter_order'])
    y = np.einsum('knhq,kfh->fnq', xc, params['W1'])
    y += np.expand_dims(params['b1'], axis=2)

    # nonlinear layer
    # y = np.tanh(y)
    y = ReLU(y)

    # dense layer
    y = np.einsum('fnq,cfn->cq', y, params['W2'])
    y += np.expand_dims(params['b2'], axis=1)

    outputs = np.real(y.T)
    return outputs - logsumexp(outputs, axis=1, keepdims=True)
Example #12
0
 def grad_like(self, wsp, ws, ws_k, xi):
     #print('start grad_like')
     conv = np.real(fft.ifft2(ws_k * self.psf_k))
     #convolution of ws with psf
     term1 = (
         conv - self.data
     ) / self.n_grid**2 / self.sig_noise**2  #term thats squared in like (with N in denom)
     grad = np.zeros((self.n_grid, self.n_grid), dtype='complex')
     for i in range(0, self.n_grid):
         for j in range(0, self.n_grid):
             #try to modulate by hand
             ft1 = fft.fft2(1 / (1 + np.exp(-1 * wsp / xi)))
             ftp = np.roll(ft1, (i, j), axis=(0, 1))
             term2 = fft.ifft2(ftp * self.psf_k)
             grad[i, j] = np.sum(term1 * term2)
     grad_real = self.complex_to_real(np.conj(grad.flatten()))
     #embed to 2R
     #print('end grad_like');
     return grad_real
Example #13
0
def rcwa_assembly(dofold,freq,theta,phi,planewave):
    '''
    planewave:{'p_amp',...}
    '''
    df = f_symmetry(dofold,Mx,My,xsym,ysym,Nlayer=Nlayer)
    dof = b_filter(df,bproj)

    obj = rcwa.RCWA_obj(nG,L1,L2,freq,theta,phi,verbose=0)
    obj.Add_LayerUniform(thick0,epsuniform)
    epsdiff=[]
    for i in range(Nlayer):
        epsdiff.append(mstruct[i].epsilon(lam0/np.real(freq),x_type = 'lambda')-epsbkg)
        obj.Add_LayerGrid(thick[i],epsdiff[i],epsbkg,Nx,Ny)
    obj.Add_LayerUniform(thickN,epsuniform)
    obj.Init_Setup(Gmethod=0)
    obj.MakeExcitationPlanewave(planewave['p_amp'],planewave['p_phase'],planewave['s_amp'],planewave['s_phase'],order = 0)
    obj.GridLayer_getDOF(dof)
    
    return obj,dof,epsdiff
    def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        # The cost is the infidelity of each evolved state and its target state.
        inner_products = anp.matmul(self.target_states_dagger, states)[:, 0, 0]
        fidelities = anp.real(inner_products * anp.conjugate(inner_products))
        fidelity_normalized = anp.sum(fidelities) / self.state_count
        infidelity = 1 - fidelity_normalized

        return infidelity * self.cost_multiplier
Example #15
0
def my_spectral_clustering(sim_mat, n_clusters=2):

    N = sim_mat.shape[0]
    sim_mat = sim_mat - np.diag(np.diag(sim_mat))
    t1 = 1. / np.sqrt(np.sum(sim_mat, axis=1))
    t2 = np.dot(t1.reshape(N, 1), t1.reshape(1, N))

    lap_mat = np.eye(N) - sim_mat * t2
    eig_val, eig_vec = np.linalg.eig(lap_mat)
    idx = eig_val.argsort()
    eig_val = eig_val[idx]
    eig_vec = np.real(eig_vec[:, idx])

    t3 = np.diag(np.sqrt(1. / np.sum(eig_vec[:, 0:n_clusters]**2, axis=1)))
    embd = np.dot(t3, eig_vec[:, 0:n_clusters])

    clf = KMeans(n_clusters=n_clusters, n_jobs=-1)
    label_pred = clf.fit_predict(embd)

    return label_pred
Example #16
0
def my_spectral_clustering(sim_mat, n_clusters=2):
    
    N = sim_mat.shape[0]
    sim_mat = sim_mat - np.diag(np.diag(sim_mat))
    t1 = 1./np.sqrt(np.sum(sim_mat, axis=1))
    t2 = np.dot(t1.reshape(N,1), t1.reshape(1,N))

    lap_mat = np.eye(N) - sim_mat * t2
    eig_val, eig_vec = np.linalg.eig(lap_mat)
    idx = eig_val.argsort()
    eig_val = eig_val[idx]
    eig_vec = np.real(eig_vec[:, idx])
    
    t3 = np.diag(np.sqrt(1./np.sum(eig_vec[:,0:n_clusters]**2, axis=1)))
    embd = np.dot(t3, eig_vec[:, 0:n_clusters])

    clf = KMeans(n_clusters = n_clusters, n_jobs=-1)
    label_pred = clf.fit_predict(embd)
    
    return label_pred
Example #17
0
def pu2r(*tlist):
    """
    Returns real part of u2, and registers it as being primitive.

    Primitive means that its derivative will be provided in a defvjp (
    def of vector-jacobian-product) so no need for autograd to calculate it
    from the u2 definition.

    Parameters
    ----------
    tlist : list[float]
        len = 4

    Returns
    -------
    np.ndarray
        shape=(2,2)

    """
    return np.real(u2_alt(*tlist))
Example #18
0
File: layers.py Project: maka89/FIR
    def forward(self, x):
        # X - [n_ex, length, n_in]
        # Y - [n_ex, length, n_out]

        #Y=np.zeros((X.shape[0],X.shape[1],self.n_out))

        lf = self.fir_length
        lx = x.shape[1]

        x2 = np.concatenate((x, np.zeros((x.shape[0], lf, x.shape[2]))),
                            axis=1)
        f2 = np.concatenate((self.W, np.zeros((lx, self.n_inp, self.n_out))),
                            axis=0)
        X = np.fft.fft(x2, axis=1)
        F = np.fft.fft(f2, axis=0)

        tmp = np.sum(F[np.newaxis, ...] * X[..., np.newaxis], axis=2)
        Y = np.real(np.fft.ifft(tmp, axis=1))[:, 0:x.shape[1]]
        Y += self.b.reshape(1, 1, -1)

        return Y
    def loss(K_conj):
        """
            K is a tensor of CONJUGATE Kraus Operators of dim s x w x n x n
            s: output_dim
            w: ops_per_output
            n: state_dim
        """
        total_loss = 0.0

        # Iterate over each sequence in batch
        for i in range(batch.shape[0]):
            seq = batch[i]
            rho_new = rho.copy()
            # burn in
            for b in range(burn_in):
                temp_rho = np.zeros(
                    [K_conj.shape[1], K_conj.shape[2], K_conj.shape[3]],
                    dtype='complex128')
                for w in range(K_conj.shape[1]):
                    temp_rho[w, :, :] = np.dot(
                        np.dot(K[int(seq[b]) - 1, w, :, :], rho_new),
                        np.conjugate(K[int(seq[b]) - 1, w, :, :]).T)
                rho_new = np.sum(temp_rho, 0)
                rho_new = rho_new / np.trace(rho_new)

            # Compute likelihood for the sequence
            for s in seq[burn_in:]:
                rho_sum = np.zeros([K_conj.shape[2], K_conj.shape[2]],
                                   dtype='complex128')
                for w in range(K.shape[1]):
                    # subtract 1 to adjust for MATLAB indexing
                    rho_sum += np.dot(
                        np.dot(np.conjugate(K_conj[int(s) - 1, w, :, :]),
                               rho_new), K_conj[int(s) - 1, w, :, :].T)

                rho_new = rho_sum

            total_loss += np.log(np.real(np.trace(rho_new)))

        return -total_loss / batch.shape[0]
Example #20
0
def complex_as_matrix(z, n):
    """Represent a complex number as a matrix.
    
    Parameters
    ----------
    z : complex float
    n : int (even)
    
    Returns
    -------
    Z : ndarray (n,n)
        Real-valued n*n tri-diagonal matrix representing z in the ring of n*n matrices.
        
    """

    Z = np.zeros((n, n))
    ld = np.zeros(n - 1)
    ld[0::2] = np.imag(z)
    np.fill_diagonal(Z[1:], ld)
    Z = Z - Z.T
    np.fill_diagonal(Z, np.real(z))
    return Z
Example #21
0
def ifft2_and_shift(var_real,
                    var_imag,
                    axes=(-2, -1),
                    override_backend=None,
                    normalize=False):
    bn = override_backend if override_backend is not None else global_settings.backend
    if bn == 'autograd':
        var = var_real + 1j * var_imag
        norm = None if not normalize else 'ortho'
        var = anp.fft.fftshift(anp.fft.ifft2(var, axes=axes, norm=norm),
                               axes=axes)
        return anp.real(var), anp.imag(var)
    elif bn == 'pytorch':
        var = tc.stack([var_real, var_imag], dim=-1)
        var = tc.ifft(var, signal_ndim=2, normalized=normalize)
        var_real, var_imag = tc.split(var, 1, dim=-1)
        slicer = [slice(None)] * (var_real.ndim - 1) + [0]
        var_real = var_real[tuple(slicer)]
        var_imag = var_imag[tuple(slicer)]
        var_real = fftshift(var_real, axes=axes)
        var_imag = fftshift(var_imag, axes=axes)
        return var_real, var_imag
Example #22
0
def simple_2d_filter(x, h):
    """A simple 2d filter algorithm that is differentiable with autograd.
    Uses a 2D fft approach since it is typically faster and preserves the shape
    of the input and output arrays.

    The ffts pad the operation to prevent any circular convolution garbage.

    Parameters
    ----------
    x : array_like (2D)
        Input array to be filtered. Must be 2D.
    h : array_like (2D)
        Filter kernel (before the DFT). Must be same size as `x`

    Returns
    -------
    array_like (2D)
        The output of the 2d convolution.
    """
    (kx, ky) = x.shape
    x = _edge_pad(x,((kx, kx), (ky, ky)))
    return _centered(npa.real(npa.fft.ifft2(npa.fft.fft2(x)*npa.fft.fft2(h))),(kx, ky))
Example #23
0
def best_tmat_phases(A, B, cols=True, rows=True, **kwargs):
    """Finds the angles `phi` and `psi` that minimize the Frobenius distance
    between A and B', where

    .. math::

        B' = \mathrm{diag}(\mathrm{exp}(i \phi)) B \mathrm{diag}(\mathrm{exp}(i \psi))

    :returns: Optimal value `B'` as well as minimal Frobenius distance
    """
    d = len(A)
    diagp = lambda phi: np.diag(np.exp(1.j * phi))
    B_ = lambda phi, psi: np.dot(diagp(phi), np.dot(B, diagp(psi)))
    norm_sq = lambda x: np.real(np.dot(np.conj(x.ravel()), x.ravel()))

    if cols and rows:
        cost = lambda x: norm_sq(A - B_(x[:d], x[d:]))
        init_angles = rand_angles(2 * d)
    elif rows:
        cost = lambda x: norm_sq(A - B_(x, np.zeros(d)))
        init_angles = rand_angles(d)
    elif cols:
        cost = lambda x: norm_sq(A - B_(np.zeros(d), x))
        init_angles = rand_angles(d)
    else:
        raise ValueError('Either rows or cols should be true')

    result = minimize(cost, init_angles, jac=grad(cost), **kwargs)

    if cols and rows:
        phi, psi = result['x'][:d], result['x'][d:]
    elif rows:
        phi, psi = result['x'], np.zeros(d)
    elif cols:
        phi, psi = np.zeros(d), result['x']

    # Normalization 1 / np.sqrt(2) due to real embedding
    return B_(phi, psi), result['fun'] / np.sqrt(2)
Example #24
0
    def match(self, scene):

        # 1) determine shape of scene in obs, set mask

        # 2) compute the interpolation kernel between scene and obs

        # 3) compute obs.psf in the frame of scene, store in Fourier space
        # A few notes on this procedure:
        # a) This assumes that scene.psfs and self.psfs have the same spatial shape,
        #    which will need to be modified for multi-resolution datasets
        if self._psfs is not None:
            ipad, ppad = interpolation.get_common_padding(self.images,
                                                          self._psfs,
                                                          padding=self.padding)
            self.image_padding, self.psf_padding = ipad, ppad
            _psfs = np.pad(self._psfs, ((0, 0), *self.psf_padding), 'constant')
            _target = np.pad(scene._psfs, self.psf_padding, 'constant')

            new_kernel_fft = []
            # Deconvolve the target PSF
            target_fft = np.fft.fft2(np.fft.ifftshift(_target))

            for _psf in _psfs:
                observed_fft = np.fft.fft2(np.fft.ifftshift(_psf))
                # Create the matching kernel
                kernel_fft = observed_fft / target_fft
                # Take the inverse Fourier transform to normalize the result
                # Trials without this operation are slow to converge, but in the future
                # we may be able to come up with a method to normalize in the Fourier Transform
                # and avoid this step.
                kernel = np.fft.ifft2(kernel_fft)
                kernel = np.fft.fftshift(np.real(kernel))
                kernel /= kernel.sum()
                # Store the Fourier transform of the matching kernel
                new_kernel_fft.append(np.fft.fft2(np.fft.ifftshift(kernel)))
            self.psfs_fft = np.array(new_kernel_fft)

        return self
Example #25
0
    def g_out_antihess(y):
        lp = snp_log_probs(y)
        ret = 0.0
        for l in seg_sites._get_likelihood_sequences(lp):
            L = len(l)
            lc = make_constant(l)

            fft = np.fft.fft(l)
            # (assumes l is REAL)
            assert np.all(np.imag(l) == 0.0)
            fft_rev = np.conj(fft) * np.exp(
                2 * np.pi * 1j * np.arange(L) / float(L))

            curr = 0.5 * (fft * fft_rev - fft * make_constant(fft_rev) -
                          make_constant(fft) * fft_rev)
            curr = np.fft.ifft(curr)[(L - 1)::-1]

            # make real
            assert np.allclose(np.imag(curr / L), 0.0)
            curr = np.real(curr)
            curr = curr[0] + 2.0 * np.sum(curr[1:int(np.sqrt(L))])
            ret = ret + curr
        return ret
def fft_convolve(*images):
    """Use FFT's to convove an image with a kernel

    Parameters
    ----------
    images: list of array-like
        A list of images to convolve.

    Returns
    -------
    result: array
        The convolution in pixel space of `img` with `kernel`.
    """
    from autograd.numpy.numpy_boxes import ArrayBox
    Images = [np.fft.fft2(np.fft.ifftshift(img)) for img in images]
    if np.any([isinstance(img, ArrayBox) for img in images]):
        Convolved = Images[0]
        for img in Images[1:]:
            Convolved = Convolved * img
    else:
        Convolved = np.prod(Images, 0)
    convolved = np.fft.ifft2(Convolved)
    return np.fft.fftshift(np.real(convolved))
Example #27
0
def realify(Y):
    """Convert data in k-dimensional complex space to 2k-dimensional
    real space.

    Parameters
    ----------
    Y : ndarray (k,n)
        Real-valued array of data, `k` must be even.

    Returns
    -------
    Yreal : ndarray (2k,n)
        Complex-valued array of data.

    """

    if Y.ndim == 1:
        Yreal = np.zeros(2 * Y.shape[0])
    else:
        Yreal = np.zeros((2 * Y.shape[0], Y.shape[1]))
    Yreal[0::2] = np.real(Y)
    Yreal[1::2] = np.imag(Y)
    return Yreal
Example #28
0
    def cost(self, controls, densities, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        densities
        system_eval_step
        
        Returns:
        cost
        """
        # The cost is the overlap (fidelity) of the evolved density and each
        # forbidden density.
        cost = 0
        for i, forbidden_densities_dagger_ in enumerate(
                self.forbidden_densities_dagger):
            density = densities[i]
            density_cost = 0
            for forbidden_density_dagger in forbidden_densities_dagger_:
                inner_product = (
                    anp.trace(anp.matmul(forbidden_density_dagger, density)) /
                    self.hilbert_size)
                fidelity = anp.real(inner_product *
                                    anp.conjugate(inner_product))
                density_cost = density_cost + fidelity
            #ENDFOR
            density_cost_normalized = density_cost / self.forbidden_densities_count[
                i]
            cost = cost + density_cost_normalized
        #ENDFOR

        # Normalize the cost for the number of evolving densities
        # and the number of times the cost is computed.
        cost_normalized = cost / self.cost_normalization_constant

        return cost_normalized * self.cost_multiplier
Example #29
0
def test_real_type():
    fun = lambda x: np.sum(np.real(x))
    df = grad(fun)
    assert type(df(1.0)) == float
    assert type(df(1.0j)) == complex
Example #30
0
 def u2r(*tlist1):
     return np.real(u2_alt(*tlist1))
Example #31
0
    Returns
    -------
    np.ndarray
        shape=(2,2)

    """
    # print('mmbbvv, pu2', pu2r(*tlist) +1j* pu2r(*tlist))
    return pu2r(*tlist) + 1j * pu2i(*tlist)


defvjp(
    pu2r,
    # defines vector-jacobian-product of pu2r
    # g.shape == pu2r.shape
    lambda ans, *tlist: lambda g: np.sum(g * np.real(d_u2(0, *tlist))),
    lambda ans, *tlist: lambda g: np.sum(g * np.real(d_u2(1, *tlist))),
    lambda ans, *tlist: lambda g: np.sum(g * np.real(d_u2(2, *tlist))),
    lambda ans, *tlist: lambda g: np.sum(g * np.real(d_u2(3, *tlist))),
    argnums=range(4))

defvjp(
    pu2i,
    # defines vector-jacobian-product of pu2i
    # g.shape == pu2i.shape
    lambda ans, *tlist: lambda g: np.sum(g * np.imag(d_u2(0, *tlist))),
    lambda ans, *tlist: lambda g: np.sum(g * np.imag(d_u2(1, *tlist))),
    lambda ans, *tlist: lambda g: np.sum(g * np.imag(d_u2(2, *tlist))),
    lambda ans, *tlist: lambda g: np.sum(g * np.imag(d_u2(3, *tlist))),
    argnums=range(4))
Example #32
0
def to_scalar(x):
    if isinstance(getval(x), list) or isinstance(getval(x), tuple):
        return sum([to_scalar(item) for item in x])
    return np.sum(np.real(np.sin(x)))
Example #33
0
def test_real_type():
    fun = lambda x: np.sum(np.real(x))
    df = grad(fun)
    assert np.isrealobj(df(2.0))
    assert np.iscomplexobj(df(1.0j))
 def norm(x):
     return np.mean(np.square(np.real(x)) + np.square(np.imag(x)))
Example #35
0
def to_scalar(x):
    if isinstance(getval(x), list)  or isinstance(getval(x), tuple):
        return sum([to_scalar(item) for item in x])
    return np.sum(np.real(np.sin(x)))
     
 #### find R_max via bisection method ####
 
 # min value T
 T1=1e-8
 shannon_capacity_fact_fixed_time = make_shannon_capacity_fact_fixed_time(T1)
 problem = Problem(manifold=manifold_fact, cost=shannon_capacity_fact_fixed_time, verbosity=0)
 L_opt1 = solver.solve(problem)
 p_1 = -shannon_capacity_fact_fixed_time(L_opt1)
 #print p_1
 
 Sigma_opt1 = np.dot(L_opt1,L_opt1.transpose())
 Sigma_opt_norm1 = Sigma_opt1/(np.trace(Sigma_opt1)) 
 
 eigs, eigvs = np.linalg.eig(Sigma_opt_norm1)
 eff_rank1 = np.square(np.sum(np.real(eigs)))/np.sum(np.square(np.real(eigs)))
 
 # max value T
 T2=10.
 shannon_capacity_fact_fixed_time = make_shannon_capacity_fact_fixed_time(T2)
 problem = Problem(manifold=manifold_fact, cost=shannon_capacity_fact_fixed_time, verbosity=0)
 L_opt2 = solver.solve(problem)
 p_2 = -shannon_capacity_fact_fixed_time(L_opt2)
 #print p_2
 
 # mean value T
 Tm = (T1+T2)/2
 shannon_capacity_fact_fixed_time = make_shannon_capacity_fact_fixed_time(Tm)
 problem = Problem(manifold=manifold_fact, cost=shannon_capacity_fact_fixed_time, verbosity=0)
 L_optm = solver.solve(problem)
 p_m = -shannon_capacity_fact_fixed_time(L_optm)
Example #37
0
    def shift_spectrum(self, spec, shift_in_pxl):
        kernel = np.exp(-1j*2*np.pi*self.freq_grid*shift_in_pxl)

        return np.real(np.fft.ifft( np.fft.fft(spec) * kernel ))
Example #38
0
 def vjp(g):
     return anp.real(solve_sylvester(ans_transp, ans_transp, g))
Example #39
0
 def fun(a):
     r, i = np.real(a), np.imag(a)
     a = np.abs(r)**1.4 + np.abs(i)**1.3
     return np.sum(np.sin(a))
def holomorphic_grad(fun, x):
    if not vspace(x).iscomplex:
        warnings.warn("Input to holomorphic_grad is not complex")
    return grad(lambda x: np.real(fun(x)))(x)
for pp in range(M):

    # upload C. elegans connectivity matrix (insert full file path)
    A = np.loadtxt("insert_full_path/A_C_elegans.txt", usecols=range(n))

    for tt in range(279):
        for qq in range(279):
            if tt > qq:
                if random() < 0.5:
                    A_tmp = A[tt, qq]
                    A[tt, qq] = A[qq, tt]
                    A[qq, tt] = A_tmp

    w, v = np.linalg.eig(A)
    A = A - (max(np.real(w)) + 0.1) * I

    # vector of transmission rates
    rate_vec = np.zeros(len(T_vec))

    k = 0

    # plot Shannon transmission rate R_T over time
    for T in T_vec:

        ### compute Shannon transmission rate ###

        def dlyap_iterative(a, q, eps=LYAPUNOV_EPSILON, iter_limit=ITER_LIMIT):
            error = 1E+6

            x = q
Example #42
0
    Nr = TrackNormal(rx)

    # psie is sort of backwards: higher angles go to the left
    return np.angle(Nx) - np.angle(Nr)


if __name__ == '__main__':
    TRACK_SPACING = 19.8  # cm
    x = SVGPathToTrackPoints("oakwarehouse.path", TRACK_SPACING)[:-1]

    xm = np.array(x)[:, 0] / 50  # 50 pixels / meter
    track_k = TrackCurvature(xm)
    Nx = TrackNormal(xm)
    u = 1j * Nx
    np.savetxt("track_x.txt",
               np.vstack([np.real(xm), np.imag(xm)]).T.reshape(-1),
               newline=",\n")
    np.savetxt("track_u.txt",
               np.vstack([np.real(u), np.imag(u)]).T.reshape(-1),
               newline=",\n")
    np.savetxt("track_k.txt", track_k, newline=",\n")

    ye, val, stuff = OptimizeTrack(xm, 1.4, 0.1)
    psie = RelativePsie(ye, xm)

    rx = u*ye + xm
    raceline_k = TrackCurvature(rx)

    np.savetxt("raceline_k.txt", raceline_k, newline=",\n")
    np.savetxt("raceline_ye.txt", ye, newline=",\n")
    np.savetxt("raceline_psie.txt", psie, newline=",\n")
Example #43
0
    Nr = TrackNormal(rx)

    # psie is sort of backwards: higher angles go to the left
    return np.angle(Nx) - np.angle(Nr)


if __name__ == '__main__':
    TRACK_SPACING = 19.8  # cm
    x = SVGPathToTrackPoints("oakwarehouse.path", TRACK_SPACING)[:-1]

    xm = np.array(x)[:, 0] / 50  # 50 pixels / meter
    track_k = TrackCurvature(xm)
    Nx = TrackNormal(xm)
    u = 1j * Nx
    np.savetxt("track_x.txt",
               np.vstack([np.real(xm), np.imag(xm)]).T.reshape(-1),
               newline=",\n")
    np.savetxt("track_u.txt",
               np.vstack([np.real(u), np.imag(u)]).T.reshape(-1),
               newline=",\n")
    np.savetxt("track_k.txt", track_k, newline=",\n")

    ye, val, stuff = OptimizeTrack(xm, 1.4, 0.1)
    psie = RelativePsie(ye, xm)

    rx = u * ye + xm
    raceline_k = TrackCurvature(rx)

    np.savetxt("raceline_k.txt", raceline_k, newline=",\n")
    np.savetxt("raceline_ye.txt", ye, newline=",\n")
    np.savetxt("raceline_psie.txt", psie, newline=",\n")
Example #44
0
def polyinterp(points, doPlot=None, xminBound=None, xmaxBound=None):
    """ polynomial interpolation
    Parameters
    ----------
    points: shape(pointNum, 3), three columns represents x, f, g
    doPolot: set to 1 to plot, default 0
    xmin: min value that brackets minimum (default: min of points)
    xmax: max value that brackets maximum (default: max of points)
    
    set f or g to sqrt(-1)=1j if they are not known
    the order of the polynomial is the number of known f and g values minus 1

    Returns
    -------
    minPos:
    fmin:
    """
    
    if doPlot == None:
        doPlot = 0

    nPoints = points.shape[0]
    order = np.sum(np.imag(points[:, 1:3]) == 0) -1
    
    # code for most common case: cubic interpolation of 2 points
    if nPoints == 2 and order == 3 and doPlot == 0:
        [minVal, minPos] = [np.min(points[:,0]), np.argmin(points[:,0])]
        notMinPos = 1 - minPos
        d1 = points[minPos,2] + points[notMinPos,2] - 3*(points[minPos,1]-\
                points[notMinPos,1])/(points[minPos,0]-points[notMinPos,0])

        t_d2 =  d1**2 - points[minPos,2]*points[notMinPos,2]
        if t_d2 > 0:
            d2 = np.sqrt(t_d2)
        else:
            d2 = np.sqrt(-t_d2) * np.complex(0,1)
        if np.isreal(d2):
            t = points[notMinPos,0] - (points[notMinPos,0]-points[minPos,0])*\
                    ((points[notMinPos,2]+d2-d1)/(points[notMinPos,2]-\
                    points[minPos,2]+2*d2))
            minPos = np.min([np.max([t,points[minPos,0]]), points[notMinPos,0]])
        else:
            minPos = np.mean(points[:,0])
        fmin = minVal
        return (minPos, fmin)
    
    xmin = np.min(points[:,0])
    xmax = np.max(points[:,0])

    # compute bounds of interpolation area
    if xminBound == None:
        xminBound = xmin
    if xmaxBound == None:
        xmaxBound = xmax

    # constraints based on available function values
    A = np.zeros((0, order+1))
    b = np.zeros((0, 1))
    for i in range(nPoints):
        if np.imag(points[i,1]) == 0:
            constraint = np.zeros(order+1)
            for j in np.arange(order,-1,-1):
                constraint[order-j] = points[i,0]**j
            A = np.vstack((A, constraint))
            b = np.append(b, points[i,1])
    
    # constraints based on availabe derivatives
    for i in range(nPoints):
        if np.isreal(points[i,2]):
            constraint = np.zeros(order+1)
            for j in range(1,order+1):
                constraint[j-1] = (order-j+1)* points[i,0]**(order-j)
            A = np.vstack((A, constraint))
            b = np.append(b,points[i,2])
    
    # find interpolating polynomial
    params = np.linalg.solve(A, b)

    # compute critical points
    dParams = np.zeros(order)
    for i in range(params.size-1):
        dParams[i] = params[i] * (order-i)
    
    if np.any(np.isinf(dParams)):
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0]))
    else:
        cp = np.concatenate((np.array([xminBound, xmaxBound]), points[:,0], \
                np.roots(dParams)))
    
    # test critical points
    fmin = np.infty;
    minPos = (xminBound + xmaxBound)/2.
    for xCP in cp:
        if np.imag(xCP) == 0 and xCP >= xminBound and xCP <= xmaxBound:
            fCP = np.polyval(params, xCP)
            if np.imag(fCP) == 0 and fCP < fmin:
                minPos = np.double(np.real(xCP))
                fmin = np.double(np.real(fCP))
    
    # plot situation (omit this part for now since we are not going to use it
    # anyway)

    return (minPos, fmin)
Example #45
0
 def fun(a):
     b = a + 1.0j
     c = b[0] + 1.5
     d = a + b
     e = d + c
     return np.sum(np.sin(np.real(e)))
Example #46
0
 def fun(a):
     r, i = np.real(a), np.imag(a)
     a = np.abs(r)**1.4 + np.abs(i)**1.3
     return to_scalar(a)
Example #47
0
def to_scalar(x):
    return np.sum(np.real(np.sin(x)))
Example #48
0
def to_scalar(x):
    if isinstance(x, list)  or isinstance(x, ListNode) or \
       isinstance(x, tuple) or isinstance(x, TupleNode):
        return sum([to_scalar(item) for item in x])
    return np.sum(np.real(np.sin(x)))
Example #49
0
def test_real_type():
    fun = lambda x: np.sum(np.real(x))
    df = grad(fun)
    assert type(df(1.0)) == float
    assert type(df(1.0j)) == complex
Example #50
0
def test_falseyness():
    fun = lambda x: np.real(x**2 if np.iscomplex(x) else np.sum(x))
    check_grads(fun)(2.)
    check_grads(fun)(2. + 1j)