Example #1
0
 def mat(self):
     numel = 2**prod(self.size)
     if prod(self.size) > 25:
         raise Warning('this hamiltonian could be too large')
     data = lil_matrix((numel, numel), dtype='complex128')
     for lhs in IterateAll(size=self.size):
         for rhs, val in self.nnz(lhs):
             data[utils.bin(lhs), utils.bin(rhs)] += val
     return data
Example #2
0
    def gen(self, ham, itr=1000, nbasis=1000, burn=500, thin=1):
        _, state = ground(ham)
        n = ham.lattice.numel()

        try:
            # os.makedirs(os.path.join(self.root, self.raw_folder))
            os.makedirs(os.path.join(self.root, self.processed_folder))
        except OSError as e:
            if e.errno == errno.EEXIST:
                pass
            else:
                raise

        data = []
        basis = []
        for _ in range(nbasis):
            ops = MBOp(choice([sigmax, sigmay, sigmaz]) for i in range(n))
            _state = ops.invtrans(state)
            sampler = STMetropolis(proposal=lambda x: abs(_state[bin(x)])**2,
                                   size=ham.size)
            data.extend(sampler.sample(itr=itr, burn=burn, thin=thin))
            basis.extend(
                torch.FloatTensor(ops.params()).resize_(2 * n)
                for i in range(len(sampler.collector)))
        return data, basis
Example #3
0
 def test_generate(self):
     syn_dis = np.zeros(16)
     for i in range(len(self.data)):
         config, basis = self.data[i]
         syn_dis[bin(config)] += 1
     syn_dis = [each / sum(syn_dis) for each in syn_dis]
     self.assertLess(norm(self.state * self.state.conj() - syn_dis), 0.05)
Example #4
0
 def check(self):
     p1 = self.state * self.state.conj()
     p2 = np.zeros(*self.state.shape)
     for each in self.train_data:
         p2[bin(each)] += 1
     p2 /= np.linalg.norm(p2, ord=1)
     print('nornmalized frequency sum (should be 1): %s' % sum(p2))
     print('distance to exact (should be 0): %s' % np.linalg.norm(p2 - p1))
Example #5
0
def gen_tomo_data(n, name):
    h = Ham(name, length=n, J=(1.0, 1.0), pbc=True)
    _, state = ground(h)
    ops = MBOp(choice([sigmax, sigmay, sigmaz]) for i in range(n))
    _state = ops.invtrans(state)
    sampler = STMetropolis(proposal=lambda x: _state[bin(x)], size=h.size)
    data = sampler.sample(itr=1000, burn=500, thin=1)
    return [ops.params(), list(data)]
Example #6
0
    def mat(self):
        """get the matrix form

        Returns:
            hamiltonian matrix: a scipy.sparse.lil_matrix

        Raises:
            Warning: when number of elements in the hamiltonian
            is too large (> 25) a waring will raises.
        """
        numel = 2**_prod(self.size)
        if _prod(self.size) > 25:
            raise Warning('this hamiltonian could be too large')
        data = lil_matrix((numel, numel), dtype='complex128')
        for lhs in IterateAll(size=self.size):
            for rhs, val in self.nnz(lhs):
                data[utils.bin(lhs), utils.bin(rhs)] += val
        return data
Example #7
0
def sample_state(Gen, op, itr):
    p = np.zeros(2**nparticles)
    for i in range(itr // batch_size):
        noise = torch.randn(batch_size, noise_size)
        z = to_var(noise)
        fakes = Gen(z)
        for each_fake in fakes:
            index = bin(torch.sign(each_fake.data))
            p[index] += 1.0
    return p / sum(p)
Example #8
0
    def test_directsample(self):
        print('testing direct sampler')
        dis = np.random.rand(16)
        dis = dis / np.linalg.norm(dis, ord=1)
        sampler = spl.PseudoRandom(dis, (4, ))
        syn_dis = np.zeros(16)
        for each in sampler.sample(itr=1000):
            syn_dis[utils.bin(each)] += 1
        syn_dis = syn_dis / np.linalg.norm(syn_dis, ord=1)
        self.assertLess(np.linalg.norm(syn_dis - dis), 0.05)

        state = np.random.rand(16) + 1.j * np.random.rand(16)
        dis = state * state.conj()
        dis = dis / np.linalg.norm(dis, ord=1)
        sampler = spl.PseudoRandom(dis, (4, ))
        syn_dis = np.zeros(16)
        for each in sampler.sample(itr=1000):
            syn_dis[utils.bin(each)] += 1
        syn_dis = syn_dis / np.linalg.norm(syn_dis, ord=1)
        self.assertLess(np.linalg.norm(syn_dis - dis), 0.05)
Example #9
0
 def _test_sampler(self, sampler):
     sampler.sample(itr=10000, burn=500, thin=1, inverse=0.4)
     sample_p = np.zeros(2**4)
     exact_p = np.zeros(2**4)
     for each_sample in sampler.collector:
         sample_p[utils.bin(each_sample)] += 1.0
     sample_p = sample_p / sum(sample_p)
     for i, each in enumerate(configs.IterateAll(size=self.size)):
         exact_p[i] = self._proposal(each)
     exact_p = exact_p / sum(exact_p)
     self.assertLess(np.linalg.norm(sample_p - exact_p), 0.05)
Example #10
0
    def compare(self):
        noise_size = self.configs['noise']['size']

        p1 = self.data.state * self.data.state.conj()
        p2 = np.zeros(*self.data.state.shape)
        z = self.to_var(torch.randn(1000, noise_size))
        for each in self.generator(z):
            p2[bin(torch.sign(each.data))] += 1
        p2 = p2 / np.linalg.norm(p2, ord=1)
        dis = np.linalg.norm(p1 - p2)
        self.collect_info('compare', dis)
        print('compare to exact: %s' % dis, file=self.logfile)
Example #11
0
 def generate(self):
     data = []
     basis = []
     for _ in range(self.nbasis):
         ops = MBOp(choice([sigmax, sigmay, sigmaz]) for i in range(self.n))
         _state = ops.invtrans(self.state)
         sampler = STMetropolis(proposal=lambda x: abs(_state[bin(x)])**2,
                                size=self.size)
         data.extend(
             sampler.sample(itr=self.itr, burn=self.burn, thin=self.thin))
         basis.extend(
             torch.FloatTensor(ops.params()).resize_(2 * self.n)
             for i in range(len(sampler.collector)))
     return data, basis
Example #12
0
 def _fake_ansatz(x):
     return state[utils.bin(x)]
Example #13
0
 def _fake_proposal(x):
     return abs(state[utils.bin(x)])**2