Пример #1
0
def propagate_plane_to_curved_flat_arbitrary(k,
                                             rs_support,
                                             Eri,
                                             z,
                                             xo,
                                             yo,
                                             qs_center=(0, 0),
                                             kz_mode='local_xy'):
    """Propagate from uniformly sampled plane to arbitrarily sampled curved surface.

    Args:
        k (scalar): Wavenumber.
        rs_support (scalar or pair of): Input aperture along x and y.
        num_pointss (int or tuple of): Number of input samples along x and y. Referred to as K and L below.
        z (M*N array): Propagation distances.
        xo (Mx1 array): Output x values.
        yo (M array): Output y values.
        qs_center (tuple or pair of): Center of transverse wavenumber support.
        kz_mode (str): 'paraxial' or 'local_xy'.

    Returns:
        Ero (MxN array): Output field.
        gradxyEo (tuple of MxN arrays): Partial derivatives of output field w.r.t. x and y at constant z (not along
            the surface).
    """
    factors = math.calc_plane_to_curved_flat_arbitrary_factors(
        k, rs_support, Eri.shape, z, xo, yo, qs_center, kz_mode)
    invTx, gradxinvTx, invTy, gradyinvTy, Px, Py, Tx, Ty = factors
    # xo=i, yo=j, kx=k, ky=l, xi=m, yi=n.
    Ero = opt_einsum.contract('ik, jl, ijk, ijl, km, ln, mn -> ij', invTx,
                              invTy, Px, Py, Tx, Ty, Eri)
    gradxEo = opt_einsum.contract('ik, jl, ijk, ijl, km, ln, mn -> ij',
                                  gradxinvTx, invTy, Px, Py, Tx, Ty, Eri)
    gradyEo = opt_einsum.contract('ik, jl, ijk, ijl, km, ln, mn -> ij', invTx,
                                  gradyinvTy, Px, Py, Tx, Ty, Eri)
    return Ero, (gradxEo, gradyEo)
Пример #2
0
def mode_coeff_calculation_multiprocessing_wrapper(args):
    #Integrates F(x1,x2,...,xn) * sin((m_1+1)*pi*x_1/L_1) * ... * sin((m_n+1)*pi*x_n/L_n)) dx_1 ... dx_n. Written as a separate function to permit multiprocessing pool map
    
    F = args[0]
    domain_volume = args[1]
    mplus1_pi_over_L = args[2]
    integrator = args[3]
    two_to_the_power_ndims = args[4]
    
    coefficients = []
    for i in range(int(mplus1_pi_over_L.shape[0])):
        integrand = lambda *vars: F(*vars) * tf.reduce_prod(tf.sin(oe.contract('i...,i->i...',tf.stack(vars),mplus1_pi_over_L[i],backend = 'tensorflow')),axis=0)
        coefficients.append(-integrator(integrand) * tf.cast(two_to_the_power_ndims,tf.keras.backend.floatx()) / (tf.cast(domain_volume,tf.keras.backend.floatx()) * tf.reduce_sum(tf.square(mplus1_pi_over_L[i]))))
    
    return tf.constant(np.array(coefficients), dtype = tf.keras.backend.floatx())
Пример #3
0
    def forward(self, x, y, z):
        r"""
        Args:
            x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
            y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
            z (torch.Tensor): ``[batch_size, seq_len, n_in]``.

        Returns:
            ~torch.Tensor:
                A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len, seq_len]``.
                If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
        """

        if self.bias_x:
            x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
        if self.bias_y:
            y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
        w = contract('bzk,oikj->bozij', z, self.weight)
        # [batch_size, n_out, seq_len, seq_len, seq_len]
        s = contract('bxi,bozij,byj->bozxy', x, w, y) / self.n_in**self.scale
        # remove dim 1 if n_out == 1
        s = s.squeeze(1)

        return s
Пример #4
0
def fold_img_with_L_inv(img, mu, L_inv, scale, threshold, normalize=True):
    device = img.get_device()
    bn, nc, h, w = img.shape
    bn, nk, _ = mu.shape
    # Stop Gradient Flow
    mu_stop = mu.detach()

    # Get Scaled Heatmap
    heat_scal = get_heat_map(mu_stop, scale * L_inv, False, h)
    heat_scal = contract('bkij -> bij', heat_scal)
    heat_scal = torch.clamp(heat_scal, min=0., max=1.)
    heat_scal = torch.where(heat_scal > threshold, heat_scal,
                            torch.zeros_like(heat_scal))

    # Normalize
    norm = torch.sum(heat_scal.reshape(bn, -1),
                     dim=1).unsqueeze(1).unsqueeze(1)
    if normalize:
        heat_scal = heat_scal / norm

    # Return Folded Image around Part Means
    folded_img = contract('bcij, bij -> bcij', img, heat_scal)

    return folded_img
Пример #5
0
    def pseudo_energy(self, l_ijab):
        '''
        Compute the CCSD pseudoenergy

        :param l_ijab: Current iteration T2
        :type l_ijab: numpy array

        :return: pseudoenergy
        :rtype: double
        '''
        o = slice(0, self.no_occ)
        v = slice(self.no_occ, self.no_mo)
        # E = 1/2 <ab|ij> l_ijab
        E_pseudo = 0.5 * contract('abij,ijab->', self.MO[v, v, o, o], l_ijab)
        return E_pseudo
Пример #6
0
def test_cupy(string):  # pragma: no cover
    views = helpers.build_views(string)
    ein = contract(string, *views, optimize=False, use_blas=False)
    shps = [v.shape for v in views]

    expr = contract_expression(string, *shps, optimize=True)

    opt = expr(*views, backend='cupy')
    assert np.allclose(ein, opt)

    # test non-conversion mode
    cupy_views = backends.convert_arrays_to_cupy(views)
    cupy_opt = expr(*cupy_views, backend='cupy')
    assert isinstance(cupy_opt, cupy.ndarray)
    assert np.allclose(ein, cupy.asnumpy(cupy_opt))
Пример #7
0
    def hess_WuYang(self, v):

        Vks_a = psi4.core.Matrix.from_array(np.einsum("ijk,k->ij", self.three_overlap, v[:self.nbf]) + self.initial_guess)
        Vks_b = psi4.core.Matrix.from_array(np.einsum("ijk,k->ij", self.three_overlap, v[self.nbf:]) + self.initial_guess)

        self.frags[0].scf(  maxiter=0, #Solved non-self consistently
                            hamiltonian=["kinetic", "external", "xxxtra"], xfock_nm=[Vks_a, Vks_b],
                            get_ingredients=True)

        epsilon_occ_a = self.frags[0].eigs_a.np[:self.frags[0].nalpha, None]
        epsilon_occ_b = self.frags[0].eigs_b.np[:self.frags[0].nbeta, None]
        epsilon_unocc_a = self.frags[0].eigs_a.np[self.frags[0].nalpha:]
        epsilon_unocc_b = self.frags[0].eigs_b.np[self.frags[0].nbeta:]
        epsilon_a = epsilon_occ_a - epsilon_unocc_a
        epsilon_b = epsilon_occ_b - epsilon_unocc_b

        hess = np.zeros((self.nbf*2, self.nbf*2))
        # Alpha electrons
        hess[0:self.nbf, 0:self.nbf] = - 1.0 * contract('ai,bj,ci,dj,ij,abm,cdn -> mn',
                                                                                             self.frags[0].Ca.np[:, :self.frags[0].nalpha],
                                                                                             self.frags[0].Ca.np[:, self.frags[0].nalpha:],
                                                                                             self.frags[0].Ca.np[:, :self.frags[0].nalpha],
                                                                                             self.frags[0].Ca.np[:, self.frags[0].nalpha:],
                                                                                             np.reciprocal(epsilon_a), self.three_overlap,
                                                                                             self.three_overlap)
        # Beta electrons
        hess[self.nbf:, self.nbf:] = - 1.0 * contract('ai,bj,ci,dj,ij,abm,cdn -> mn',
                                                                                           self.frags[0].Cb.np[:, :self.frags[0].nbeta],
                                                                                           self.frags[0].Cb.np[:, self.frags[0].nbeta:],
                                                                                           self.frags[0].Cb.np[:, :self.frags[0].nbeta],
                                                                                           self.frags[0].Cb.np[:, self.frags[0].nbeta:],
                                                                                           np.reciprocal(epsilon_b),self.three_overlap,
                                                                                           self.three_overlap)
        hess = (hess + hess.T)

        return hess
Пример #8
0
def test_sequential_logmatmulexp(batch_shape, state_dim, num_steps):
    logits = torch.randn(batch_shape + (num_steps, state_dim, state_dim))
    actual = _sequential_logmatmulexp(logits)
    assert actual.shape == batch_shape + (state_dim, state_dim)

    # Check against einsum.
    operands = list(logits.unbind(-3))
    symbol = (opt_einsum.get_symbol(i) for i in range(1000))
    batch_symbols = ''.join(next(symbol) for _ in batch_shape)
    state_symbols = [next(symbol) for _ in range(num_steps + 1)]
    equation = (','.join(batch_symbols + state_symbols[t] + state_symbols[t + 1]
                         for t in range(num_steps)) +
                '->' + batch_symbols + state_symbols[0] + state_symbols[-1])
    expected = opt_einsum.contract(equation, *operands, backend='pyro.ops.einsum.torch_log')
    assert_close(actual, expected)
Пример #9
0
    def solve(self, b):
        """Summary: Under a given b vector, construct a fock matrix and diagonalize it

        F = F0 + V(b)

        FC = SCE

        resulting mo_coeff(C), mo_energy(E), and density matrix are stored as instance attributes
        """
        if not np.allclose(b, self.internal_b, rtol=1e-12, atol=1e-12):
            t = time.time()
            self.internal_b = b.copy()
            Fa = self.F0[0]+contract('t,ijt->ij', b[:self.npot], self.Sijt)
            Fb = self.F0[1]+contract('t,ijt->ij', b[self.npot:], self.Sijt)
            self.fock = ((Fa, Fb))
            e_a, c_a = scf.hf.eig(Fa, self.S)
            e_b, c_b = scf.hf.eig(Fb, self.S)

            self.mo_coeff = np.array((c_a, c_b))
            self.mo_energy = np.array((e_a, e_b))
            self.mo_occ = self.get_occ(self.mo_energy, self.mo_coeff)
            self.dm = self.make_rdm1(self.mo_coeff, self.mo_occ)
            self.t_eig += time.time()-t

            t = time.time()
            if kf_imported:
                if self.Smnt is None:
                    #AO basis for WY is same with AO basis of target density
                    self.grad_a = kspies_fort.einsum_ij_ijt_2t((self.dm[0]-self.dm_tar[0]), self.Sijt, self.nbas, self.npot)
                    self.grad_b = kspies_fort.einsum_ij_ijt_2t((self.dm[1]-self.dm_tar[1]), self.Sijt, self.nbas, self.npot)
                else:
                    self.grad_a = kspies_fort.einsum_ij_ijt_2t(self.dm[0], self.Sijt, self.nbas, self.npot)
                    self.grad_a -= kspies_fort.einsum_ij_ijt_2t(self.dm_tar[0], self.Smnt, len(self.Smnt[:, 0, 0]), self.npot)
                    self.grad_b = kspies_fort.einsum_ij_ijt_2t(self.dm[1], self.Sijt, self.nbas, self.npot)
                    self.grad_b -= kspies_fort.einsum_ij_ijt_2t(self.dm_tar[1], self.Smnt, len(self.Smnt[:, 0, 0]), self.npot)
            else:
                if self.Smnt is None:
                    self.grad_a = contract('ij,ijt->t', (self.dm[0]-self.dm_tar[0]), self.Sijt)
                    self.grad_b = contract('ij,ijt->t', (self.dm[1]-self.dm_tar[1]), self.Sijt)
                else:
                    self.grad_a = contract('ij,ijt->t', self.dm[0], self.Sijt)
                    self.grad_a -= contract('ij,ijt->t', self.dm_tar[0], self.Smnt)
                    self.grad_b = contract('ij,ijt->t', self.dm[1], self.Sijt)
                    self.grad_b -= contract('ij,ijt->t', self.dm_tar[1], self.Smnt)

            self.t_gd += time.time()-t
Пример #10
0
def make_input_tps_param(tps_param, move_point=None, scal_point=None):
    '''

    '''
    coord = tps_param.coord
    vector = tps_param.vector
    offset = tps_param.offset
    offset_2 = tps_param.offset_2
    rot_mat = tps_param.rot_mat
    t_scal = tps_param.t_scal

    scaled_coord = contract('bk,bck->bck', t_scal,
                            coord + vector - offset) + offset
    t_vector = contract('blk,bck->bcl', rot_mat,
                        scaled_coord - offset_2) + offset_2 - coord

    if move_point is not None and scal_point is not None:
        coord = contract('bk,bck->bck', scal_point, coord + move_point)
        t_vector = contract('bk,bck->bck', scal_point, t_vector)

    else:
        assert (move_point is None and scal_point is None)

    return coord, t_vector
Пример #11
0
def test_custom_random_optimizer():
    class NaiveRandomOptimizer(oe.path_random.RandomOptimizer):
        @staticmethod
        def random_path(r, n, inputs, output, size_dict):
            """Picks a completely random contraction order."""
            np.random.seed(r)
            ssa_path = []
            remaining = set(range(n))
            while len(remaining) > 1:
                i, j = np.random.choice(list(remaining), size=2, replace=False)
                remaining.add(n + len(ssa_path))
                remaining.remove(i)
                remaining.remove(j)
                ssa_path.append((i, j))
            cost, size = oe.path_random.ssa_path_compute_cost(
                ssa_path, inputs, output, size_dict)
            return ssa_path, cost, size

        def setup(self, inputs, output, size_dict):
            self.was_used = True
            n = len(inputs)
            trial_fn = self.random_path
            trial_args = (n, inputs, output, size_dict)
            return trial_fn, trial_args

    eq, shapes = oe.helpers.rand_equation(5, 3, seed=42, d_max=3)
    views = list(map(np.ones, shapes))

    exp = oe.contract(eq, *views, optimize=False)

    optimizer = NaiveRandomOptimizer(max_repeats=16)
    out = oe.contract(eq, *views, optimize=optimizer)
    assert exp == out
    assert optimizer.was_used

    assert len(optimizer.costs) == 16
Пример #12
0
    def compute_ccsd_dipole(self, t1=None,t2=None,l1=None,l2=None):
        if t1 is None: t1 = self.t1
        if t2 is None: t2 = self.t2
        if l1 is None: l1 = self.l1
        if l2 is None: l2 = self.l2

        D = self.compute_ccsd_density(t1,t2,l1,l2)

        # Compute CCSD correlated dipole
        dipoles_elec = []
        for n in range(3):
            d = contract('ui,uv,vj->ij',self.npC,np.asarray(self.ints[n]),self.npC)
            mu = ndot('ij,ji->',d,D)
            dipoles_elec.append(mu)
        return dipoles_elec
Пример #13
0
    def calculate_lambda_tensor(self, alpha, scattering_inverse):
        # TODO: replace with same caching strategy as rest of code
        n_k_points = self.n_k_points
        third_bandwidth = self.phonons.third_bandwidth
        if self.phonons.is_classic:
            stat_label = 'c'
        else:
            stat_label = 'q'
        str_to_add = str(n_k_points) + '_' + str(alpha) + '_' + str(
            int(self.phonons.temperature)) + '_' + stat_label
        lamdb_filename = 'lamdb_' + '_' + str_to_add
        psi_filename = 'psi_' + str_to_add
        psi_inv_filename = 'psi_inv_' + str_to_add
        if third_bandwidth is not None:
            lamdb_filename = lamdb_filename + '_' + str(third_bandwidth)
            psi_filename = psi_filename + '_' + str(third_bandwidth)
            psi_inv_filename = psi_inv_filename + '_' + str(third_bandwidth)

        lamdb_filename = lamdb_filename + '.npy'
        psi_filename = psi_filename + '.npy'
        psi_inv_filename = psi_inv_filename + '.npy'

        try:
            self._lambd = np.load(lamdb_filename, allow_pickle=True)
            self._psi = np.load(psi_filename, allow_pickle=True)
            self._psi_inv = np.load(psi_inv_filename, allow_pickle=True)
        except FileNotFoundError as err:
            logging.info(err)
            n_phonons = self.n_phonons
            physical_mode = self.phonons.physical_mode.reshape(n_phonons)
            velocity = self.phonons.velocity.real.reshape(
                (n_phonons, 3))[physical_mode, :]
            heat_capacity = self.phonons.heat_capacity.flatten()[physical_mode]
            sqr_heat_capacity = heat_capacity**0.5

            v_new = velocity[:, alpha]
            lambd_tensor = contract('m,m,mn,n->mn', sqr_heat_capacity, v_new,
                                    scattering_inverse, 1 / sqr_heat_capacity)

            # evals and evect equations
            # lambd_tensor = psi.dot(np.diag(lambd)).dot(psi_inv)
            # lambd_tensor.dot(psi) = psi.dot(np.diag(lambd))

            self._lambd, self._psi = np.linalg.eig(lambd_tensor)
            self._psi_inv = np.linalg.inv(self._psi)
            np.save(lamdb_filename, self._lambd)
            np.save(psi_filename, self._psi)
            np.save(psi_inv_filename, self._psi_inv)
Пример #14
0
    def vp_wy_r(self, dd_a, dd_b):
        """ 
        Performs the Wu-Yang Method on the grid
        """

        dvp = np.zeros_like(self.molecule.Da)
        dd = dd_a + dd_b 

        #Bring grid information
        points_func = self.molecule.Vpot.properties()[0]
        
        #Calculate denominator
        for block in range(self.molecule.Vpot.nblocks()):
            grid_block = self.molecule.Vpot.get_block(block)
            points_func.compute_points(grid_block)
            npoints = grid_block.npoints()
            lpos = np.array(grid_block.functions_local_to_global())
            w = np.array(grid_block.w())
            phi = np.array(points_func.basis_values()["PHI"])[:npoints, :lpos.shape[0]]

            x_a = np.zeros((npoints, npoints))
            x_b = np.zeros((npoints, npoints))

            for frag in self.frags:
                orb_a = frag.orbitals["alpha_r"]
                orb_b = frag.orbitals["beta_r"]
                
                for i_occ in range(0,frag.nalpha):
                    for i_vir in range(frag.nalpha, frag.nbf):
                        
                        den = frag.eigs_a.np[i_occ] - frag.eigs_a.np[i_vir]
                        num = np.zeros((npoints, npoints))

                        for r1 in range(npoints):
                            for r2 in range(npoints):
                                num[r1, r2] = orb_a[str(i_occ)][block][r1] * orb_a[str(i_vir)][block][r1] * orb_a[str(i_vir)][block][r2] * orb_a[str(i_occ)][block][r2]        
                        x_a += num / den

                        #Assume x_b = x_a

            dvp_block = np.zeros((npoints))
            for r1 in range(npoints):
                dvp_block += (1 / (x_a[r1, :] + x_a[r1, :])) * dd[block] * w  

            vtmp = contract('pb,p,p,pa->ab', phi, dvp_block, w, phi)
            dvp[(lpos[:, None], lpos)] += 0.5 * (vtmp + vtmp.T)

        return dvp, dvp
def test_torch(string):

    views = helpers.build_views(string)
    ein = contract(string, *views, optimize=False, use_blas=False)
    shps = [v.shape for v in views]

    expr = contract_expression(string, *shps, optimize=True)

    opt = expr(*views, backend='torch')
    assert np.allclose(ein, opt)

    # test non-conversion mode
    torch_views = [backends.to_torch(view) for view in views]
    torch_opt = expr(*torch_views)
    assert isinstance(torch_opt, torch.Tensor)
    assert np.allclose(ein, torch_opt.cpu().numpy())
Пример #16
0
    def ELBO_prior(self, W, q_A, q_gamma, q_alpha, q_mu, q_pi, phi):
        '''
        taking the expectation of 
        
        log p(A) + sum_k [ log p(mu_k) + log p(Sigma_k) ] + sum_i [log p(pi_i) + log p (gamma_i )] 
        + sum_i sum_j sum_k z_ijk [ log pi_ik + log p(alpha_ij) + log p(beta_ij)]
        
        ***NOTE THAT THIS TERM IS OFF BY CONSTANTS*** not the true elbo
        
        '''

        #first_term = -0.5* ((q_A**2).sum() + (q_mu**2).sum() + (q_gamma**2).sum())
        second_term1 = torch.log(q_pi + self.epsilon)
        second_term = contract('ijk,ik->', phi, second_term1)
        #third_term = -0.5* contract('ijk,ij->',phi, q_alpha.exp()**2)

        return second_term  #+ third_term + first_term
Пример #17
0
    def reintegrate_ao(self, function):
        f_nm = np.zeros((self.nbf, self.nbf))
        points_func = self.frags[0].Vpot.properties()[0]

        for block in range(self.nblocks):
            grid_block = self.frags[0].Vpot.get_block(block)
            points_func.compute_points(grid_block)
            npoints = grid_block.npoints()
            lpos = np.array(grid_block.functions_local_to_global())
            w = np.array(grid_block.w())
            phi = np.array(points_func.basis_values()["PHI"])[:npoints, :lpos.shape[0]]
            print(phi.shape)
            print(function[block])
            vtmp = contract('pb,p,p,pa->ab', phi, function[block], w, phi)
            f_nm[(lpos[:, None], lpos)] += 0.5 * (vtmp + vtmp.T)

        return f_nm
Пример #18
0
def test_tensorflow(string):
    views = helpers.build_views(string)
    ein = contract(string, *views, optimize=False, use_blas=False)
    opt = np.empty_like(ein)

    shps = [v.shape for v in views]
    expr = contract_expression(string, *shps, optimize=True)

    sess = tf.Session()
    with sess.as_default():
        expr(*views, backend='tensorflow', out=opt)

    assert np.allclose(ein, opt)

    # test non-conversion mode
    tensorflow_views = backends.convert_arrays_to_tensorflow(views)
    expr(*tensorflow_views, backend='tensorflow')
Пример #19
0
def get_heat_map(mu, L_inv, device):
    h, w, nk = 64, 64, L_inv.shape[1]

    y_t = torch.linspace(-1., 1., h).reshape(h, 1).repeat(1, w).unsqueeze(-1)
    x_t = torch.linspace(-1., 1., w).reshape(1, w).repeat(h, 1).unsqueeze(-1)

    y_t_flat = y_t.reshape(1, 1, 1, -1)
    x_t_flat = x_t.reshape(1, 1, 1, -1)

    mesh = torch.cat((y_t_flat, x_t_flat), dim=-2).to(device)
    dist = mesh - mu.unsqueeze(-1)
    proj_precision = contract('bnik, bnkf -> bnif', L_inv, dist) ** 2  # tf.matmul(precision, dist)**2
    proj_precision = torch.sum(proj_precision, -2)  # sum x and y axis
    heat = 1 / (1 + proj_precision)
    heat = heat.reshape(-1, nk, h, w)  # bn number parts width height

    return heat
Пример #20
0
def test_naive_ubersum(equation, plates):
    inputs, outputs, operands, sizes = make_example(equation)

    actual = naive_ubersum(equation, *operands, plates=plates)

    assert isinstance(actual, tuple)
    assert len(actual) == len(outputs)
    for output, actual_part in zip(outputs, actual):
        expected_shape = tuple(sizes[dim] for dim in output)
        assert actual_part.shape == expected_shape
        if not plates:
            equation_part = ','.join(inputs) + '->' + output
            expected_part = opt_einsum.contract(equation_part, *operands,
                                                backend='pyro.ops.einsum.torch_log')
            assert_equal(expected_part, actual_part,
                         msg=u"For output '{}':\nExpected:\n{}\nActual:\n{}".format(
                             output, expected_part.detach().cpu(), actual_part.detach().cpu()))
Пример #21
0
def contract(names, *tensors):
    args = []
    ids = {}
    seen_names = []
    for t in tensors:
        group = []
        for name in t._schema._names:
            if name not in ids:
                ids[name] = len(ids)
                seen_names.append(name)
            group.append(ids[name])
        args.append(t._tensor)
        args.append(group)
    names = names.split()
    keep = [n for n in seen_names if n not in names]
    args.append([ids[n] for n in keep])
    return NamedTensor(oe.contract(*args, backend="torch"), keep)
Пример #22
0
def s_term_bernoulli(y_batch, gamma_batch, eta):
    g0 = gamma_batch.sum(dim=1, keepdim=True)
    g = gamma_batch / g0
    probs = contract("mk,k->m", g, eta, backend="torch").sigmoid()
    # to prevent overflows in log
    probs_cpy = probs
    if probs.min() <= 0:
        c = probs.min().detach()
        probs = probs - c + self.epsilon
    s_term1 = (y_batch * probs.log()).sum()
    probs = probs_cpy
    if probs.max() >= 1:
        c = probs.max().detach()
        probs = probs - (c - 1) - self.epsilon
    s_term2 = ((1 - y_batch) * (1 - probs).log()).sum()
    s_term = s_term1 + s_term2
    return s_term
Пример #23
0
 def build_Dov(self, t1, t2, l1, l2):  # complete
     if self.ccwfn.model == 'CCD':
         Dov = np.zeros_like(t1)
     else:
         Dov = 2.0 * t1.copy()
         Dov += 2.0 * contract('me,imae->ia', l1, t2)
         Dov -= contract('me,miae->ia', l1, self.ccwfn.build_tau(t1, t2))
         tmp = contract('mnef,inef->mi', l2, t2)
         Dov -= contract('mi,ma->ia', tmp, t1)
         tmp = contract('mnef,mnaf->ea', l2, t2)
         Dov -= contract('ea,ie->ia', tmp, t1)
     return Dov
Пример #24
0
def test_contract_expressions(string, optimize, use_blas, out_spec):
    views = helpers.build_views(string)
    shapes = [view.shape for view in views]
    expected = contract(string, *views, optimize=False, use_blas=False)

    expr = contract_expression(string, *shapes, optimize=optimize, use_blas=use_blas)

    if out_spec and ("->" in string) and (string[-2:] != "->"):
        out, = helpers.build_views(string.split('->')[1])
        expr(*views, out=out)
    else:
        out = expr(*views)

    assert np.allclose(out, expected)

    # check representations
    assert string in expr.__repr__()
    assert string in expr.__str__()
def build_Wabef(t1, t2):
    """Builds [Stanton:1991:4334] Eqn. 7"""
    # Rate limiting step written using tensordot, ~10x faster
    # The commented out lines are consistent with the paper

    Wabef = MO[v, v, v, v].copy()

    Pab = contract('baef->abef', np.tensordot(t1, MO[v, o, v, v], axes=(0, 1)))
    # Pab = np.einsum('mb,amef->abef', t1, MO[v, o, v, v])

    Wabef -= Pab
    Wabef += Pab.swapaxes(0, 1)

    tmp_tau = build_tau(t1, t2)

    Wabef += 0.25 * np.tensordot(tmp_tau, MO[v, v, o, o], axes=((0, 1), (2, 3)))
    # Wabef += 0.25 * np.einsum('mnab,mnef->abef', tmp_tau, MO[o, o, v, v])
    return Wabef
Пример #26
0
def test_eps_single_pixel_output() -> None:
    input = torch.randn((2, 3, 2, 2, 2), dtype=torch.float64)
    core = torch.rand((*(2 for _ in range(8)), 4), dtype=torch.float64)
    eps_result = rearrange(eps_one_by_one(core, input), "b () () o -> b o")
    assert eps_result.shape == (3, 4)
    oe_result = oe.contract(
        "01234567θ,b0,b1,b2,b3,b4,b5,b6,b7->bθ",
        core,
        input[0, :, 0, 0],
        input[1, :, 0, 0],
        input[0, :, 0, 1],
        input[1, :, 0, 1],
        input[0, :, 1, 0],
        input[1, :, 1, 0],
        input[0, :, 1, 1],
        input[1, :, 1, 1],
    )
    assert torch.allclose(eps_result, oe_result)
Пример #27
0
def modified_llr_codeword(LLR_Info_bits):

    # Generator matrix of shape (m+1, 2^m) for RM(m, 1) code is needed here. So load it before hand
    # LLR_Info_bits is of shape (batch*num_sparse, m + 1)

    required_LLR_info = contract(
        'ij , jk ->ikj', LLR_Info_bits,
        Generator_Matrix_cuda)  # (batch*num_sparse, 2^m, m+1)

    sign_matrix = (-1)**((required_LLR_info <
                          0).sum(2)).float()  # (batch*num_sparse, 2^m+1)

    min_abs_LLR_info, _ = torch.min(torch.where(
        required_LLR_info == 0.,
        torch.max(required_LLR_info.abs()) + 1, required_LLR_info.abs()),
                                    dim=2)

    return sign_matrix * min_abs_LLR_info
Пример #28
0
def test_slicer():

    eq, shapes = oe.helpers.rand_equation(30, reg=5, seed=42, d_max=3)
    arrays = [np.random.uniform(size=s) for s in shapes]
    path, info = oe.contract_path(eq, *shapes, shapes=True)
    expected = oe.contract(eq, *arrays, optimize=path)

    sf = ctg.SliceFinder(info, target_size=1_000_000, target_overhead=None)
    inds, ccost = sf.search()

    assert info.largest_intermediate > 1_000_000
    assert ccost.size <= 1_000_000
    assert ccost.total_flops > info.opt_cost
    assert len(inds) > 1

    sc = sf.SlicedContractor(arrays)
    assert sc.total_flops == ccost.total_flops
    assert sc.contract_all() == pytest.approx(expected)
Пример #29
0
    def T2eq_rhs_CC2(self, t1, t2, F):
        v = self.vir
        o = self.occ
        TEI = self.TEI

        fae = F[v, v].copy()
        fmi = F[o, o].copy()
        wabef_2 = self.Wabef_2(t1, t2, F)
        wmnij_2 = self.Wmnij_2(t1, t2, F)
        #All terms in the T2 Equation
        term1 = TEI[o, o, v, v].copy()

        term2tmp = fae  #- 0.5 *contract('me,mb->be', fme, t1)
        term2a = contract('be,ijae->ijab', term2tmp, t2)
        term2 = term2a - term2a.swapaxes(2, 3)  #swap ab

        term3temp = fmi  #+ 0.5 *contract('me,je->mj', fme, t1)
        term3a = -contract('mj,imab->ijab', term3temp, t2)
        term3 = term3a - term3a.swapaxes(0, 1)  #swap ij

        tau = contract('ma,nb->mnab', t1, t1) - contract('na,mb->mnab', t1, t1)
        term44 = 0.5 * contract('mnij,mnab->ijab', wmnij_2, tau)
        term55 = 0.5 * contract('abef,ijef->ijab', wabef_2, tau)

        term6tmp = -contract('mbej,ie,ma->ijab', TEI[o, v, v, o], t1, t1)
        term6 = term6tmp - term6tmp.swapaxes(2, 3) - term6tmp.swapaxes(
            0, 1) + term6tmp.swapaxes(0, 1).swapaxes(2, 3)

        term7tmp = contract('abej,ie->ijab', TEI[v, v, v, o], t1)
        term7 = term7tmp - term7tmp.swapaxes(0, 1)  #swap ij

        term8tmp = -contract('mbij,ma->ijab', TEI[o, v, o, o], t1)
        term8 = term8tmp - term8tmp.swapaxes(2, 3)  #swap ab

        total = term1 + term2 + term3 + term44 + term55 + term6 + term7 + term8
        return total
Пример #30
0
 def LRWibjm(self, t1, t2, F):
     v = self.vir
     o = self.occ
     TEI = self.TEI
     Fme = self.Fme(t1, t2, F)
     Wmnij = self.LSWmnij(t1, t2, F)
     
     term1 = -0.5*TEI[o, v, o, o].copy()
     term2 = 0.5*contract('ie,jmbe->ibjm', Fme, t2)
     term3 = contract('injm,nb->ibjm', Wmnij, t1)
     term4a = -TEI[o, v, v, o].copy() - contract('inef,nmfb->ibem', TEI[o, o, v, v], t2) 
     term4 = contract('ibem,je->ibjm', term4a, t1)
     tau = 0.25*t2 + 0.5*contract('ia,jb->ijab', t1, t1) #-contract('ib,ja->ijab', t1, t1)
     term5 = -contract('ibef,jmef->ibjm', TEI[o, v, v, v], tau)
     term6 = contract('inem,jneb->ibjm', TEI[o, o, v, o], t2)
     total = term1 + (term2 + term3 + term4 + term5 + term6) 
     return total