Пример #1
0
def test3():
    """
    The binomial distribution
    :return:
    """
    from torch.distributions.binomial import Binomial
    # 100-count of trials: 0, 0.2, 0.8 and 1 are event probabilities
    dist = Binomial(100, torch.tensor([0, 0.2, 0.8, 1]))
    dist.sample()  # tensor([  0.,  19.,  72., 100.])
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    n, d = args.n, args.d
    p = np.sqrt(d) / d
    probs = torch.ones(d) * p
    sampler = Binomial(1, probs)

    data = sampler.sample(sample_shape=(torch.Size([n])))

    # add some correlation
    offset = int(np.sqrt(d))
    for i in range(offset):
        data[:, i + offset] = data[:, i]

    weight = torch.randn((d))
    noise = torch.randn((n)) / 2

    labels = data @ weight + noise

    print('data shape', data.shape)

    path = os.path.join(args.save_prefix, 'sparse_' + str(n) + '_' + str(d))
    os.makedirs(path)
    data_path = os.path.join(path, 'data.npy')
    p_path = os.path.join(path, 'p.npy')
    labels_path = os.path.join(path, 'labels.npy')
    np.save(data_path, data.numpy())
    np.save(p_path, p)
    np.save(labels_path, labels.numpy())
Пример #3
0
    def __getitem__(self, idx):
        # idx only acts as a counter while generating batches.
        num_item = torch.randint(self.min_item,
                                 self.max_item, (1, ),
                                 dtype=torch.long).item()
        prob = 0.5 * \
            torch.ones([self.seq_len, self.seq_width], dtype=torch.float64)
        seq = Binomial(1, prob)

        # fill in input two bit wider than target to account for delimiter
        # flags.
        input_items = torch.zeros([(self.seq_len + 1) * (num_item + 1) + 1,
                                   self.seq_width + 2])
        for i in range(num_item):
            input_items[(self.seq_len + 1) * i, self.seq_width] = 1.0
            input_items[(self.seq_len + 1) * i + 1:(self.seq_len + 1) *
                        (i + 1), :self.seq_width] = seq.sample()

        # generate query item randomly
        # in case of only one item, torch.randint throws error as num_item-1=0
        query_item = 0
        if num_item != 1:
            query_item = torch.randint(0,
                                       num_item - 1, (1, ),
                                       dtype=torch.long).item()
        query_seq = input_items[(self.seq_len + 1) * query_item +
                                1:(self.seq_len + 1) *
                                (query_item + 1), :self.seq_width]
        input_items[(self.seq_len + 1) * num_item,
                    self.seq_width + 1] = 1.0  # query delimiter
        input_items[(self.seq_len + 1) * num_item + 1:(self.seq_len + 1) *
                    (num_item + 1), :self.seq_width] = query_seq
        input_items[(self.seq_len + 1) * (num_item + 1),
                    self.seq_width + 1] = 1.0  # query delimiter

        # generate target sequences(item next to query in the input list)
        target_item = torch.zeros([self.seq_len, self.seq_width])
        # in case of last item, target sequence is zero

        if query_item != num_item - 1:
            target_item[:self.seq_len, :self.seq_width] = input_items[
                (self.seq_len + 1) * (query_item + 1) + 1:(self.seq_len + 1) *
                (query_item + 2), :self.seq_width]

        return {'input': input_items, 'target': target_item}
Пример #4
0
    def get_sample_wlen(self, num_item, seq_len, bs=1):

        prob = 0.5 * \
            torch.ones([seq_len, bs, self.seq_width], dtype=torch.float64)
        seq = Binomial(1, prob)

        # fill in input two bit wider than target to account for delimiter
        # flags.
        input_items = torch.zeros([(seq_len + 1) * (num_item + 1) + 1, bs,
                                   self.seq_width + 2])
        for i in range(num_item):
            input_items[(seq_len + 1) * i, :, self.seq_width] = 1.0
            input_items[(seq_len + 1) * i + 1:(seq_len + 1) *
                        (i + 1), :, :self.seq_width] = seq.sample()

        # generate query item randomly
        # in case of only one item, torch.randint throws error as num_item-1=0
        query_item = 0
        if num_item != 1:
            query_item = torch.randint(0,
                                       num_item - 1, (1, ),
                                       dtype=torch.long).item()
        query_seq = input_items[(seq_len + 1) * query_item + 1:(seq_len + 1) *
                                (query_item + 1), :, :self.seq_width]
        input_items[(seq_len + 1) * num_item, :,
                    self.seq_width + 1] = 1.0  # query delimiter
        input_items[(seq_len + 1) * num_item + 1:(seq_len + 1) *
                    (num_item + 1), :, :self.seq_width] = query_seq
        input_items[(seq_len + 1) * (num_item + 1), :,
                    self.seq_width + 1] = 1.0  # query delimiter

        # generate target sequences(item next to query in the input list)
        target_item = torch.zeros([seq_len, bs, self.seq_width])
        # in case of last item, target sequence is zero
        for b in range(bs):
            choose_max = False
            qitem = input_items[(seq_len + 1) * query_item + 1:(seq_len + 1) *
                                (query_item + 1), b, :self.seq_width]
            if qitem.contiguous().view(-1)[-1].item() == 1:
                choose_max = True
                # print("max")
            else:
                pass
                # print("min")
            if choose_max:
                cd = 0
            else:
                cd = 10000000000000000
            titem = qitem
            for ii in range(num_item):
                if ii != query_item:
                    cur_item = input_items[(seq_len + 1) * ii +
                                           1:(seq_len + 1) * (ii + 1),
                                           b, :self.seq_width]
                    curd = torch.norm(qitem.contiguous().view(-1) -
                                      cur_item.contiguous().view(-1))
                    if choose_max:
                        if curd > cd:
                            titem = ii
                            cd = curd
                    else:
                        if curd < cd:
                            titem = ii
                            cd = curd
            # print(num_item)
            # print(titem)
            # print(cd)

            target_item[:seq_len, b, :self.seq_width] = input_items[
                (seq_len + 1) * titem + 1:(seq_len + 1) * (titem + 1),
                b, :self.seq_width]

        return {'input': input_items, 'target': target_item}
Пример #5
0
 def _sampleWidthByAlphas(self):
     # define Binomial distribution on n-1 layer filters (because we have to choose at least one filter)
     dist = Binomial(self.outputChannels() - 1, logits=self._alphas)
     # draw from distribution
     width = 1 + dist.sample().type(int32).item()
     return width
 def calcNewWidthFunc(width: int,
                      alphaWidth: AlphaPerWidthBlock.AlphaWidth):
     # define Binomial distribution on n-1 layer filters (because we have to choose at least one filter)
     dist = Binomial(width - 1, logits=alphaWidth.tensor())
     # draw from distribution
     return 1 + dist.sample().type(int32).item()
Пример #7
0
    os.makedirs(model_path)

from tensorboardX import SummaryWriter
writer = SummaryWriter(os.path.join('runs', model_name))
n_iter = 0
num_samples = 50
sample = torch.randn(64, opts.nz).double().to(device)
for epoch in range(opts.epochs):
    print('=> Epoch {}'.format(epoch))
    model.train()
    running_loss = []
    for data in tqdm(train_loader):
        image = data[0]
        m = Binomial(1, image.view(-1, 784))
        # inputs = m.sample(torch.Size([num_samples])).double().to(device)
        inputs = m.sample().expand(num_samples, image.shape[0],
                                   784).double().to(device)
        optimizer.zero_grad()
        loss, bce, kld = model.train_loss(inputs)
        loss.backward()
        optimizer.step()

        running_loss.append(loss.item())
        writer.add_scalar('bce', bce, n_iter)
        writer.add_scalar('kld', kld, n_iter)
        writer.add_scalar('loss', loss, n_iter)
        n_iter += 1

    writer.add_scalar('loss_epoch', np.mean(running_loss), epoch)

    model.eval()
    running_loss = []