def quantize(self, x, ig_sm_bkts):
        if self.method == 'none':
            return x
        assert isinstance(x, torch.cuda.FloatTensor)
        bucket_size = self.bucket_size

        num_tail = math.ceil(x.numel()/bucket_size)*bucket_size-x.numel()
        xv = torch.cat((x.view(-1),
                        torch.zeros(num_tail, dtype=x.dtype, device=x.device)))
        xv = xv.view(-1, bucket_size)
        norm = xv.norm(p=self.norm_type, dim=1, keepdim=True).expand(
            xv.shape[0], xv.shape[1]).contiguous().view(-1).contiguous()

        if ig_sm_bkts:
            if xv.shape[0] > 1:
                q = torch.zeros_like(xv)
                r = torch.randint_like(xv, 1000001).long()
                self.qdq.qdqGPU(xv[:-1], norm[:-1], q[:-1], r[:-1])
                return torch.cat([q[:-1].view(-1), xv[-1][:-num_tail].view(-1)]).view(x.shape)
            else:
                return xv[-1][:-num_tail].view(x.shape)
        else:
            q = torch.zeros_like(x)
            r = torch.randint_like(x, 1000001).long()
            self.qdq.qdqGPU(x, norm, q, r)
            return q
Example #2
0
def breed_tensor(left_tensor,
                 right_tensor,
                 policy="average",
                 mutate=True,
                 max_weight_mutation=0.00005):
    if policy == "average":
        # A simple average of two tensors
        linear_combined_weight = (left_tensor + right_tensor) / 2
    elif policy == "random":
        # For every weight value, randomly select either from left or right
        left_factor = torch.randint_like(left_tensor, low=0, high=2)
        right_factor = torch.ones_like(right_tensor)
        right_factor = right_factor - left_factor
        left_factor = left_factor.type(torch.float32)
        right_factor = right_factor.type(torch.float32)
        linear_combined_weight = left_tensor * left_factor + right_tensor * right_factor

    if mutate:
        modifier = (
            torch.randint_like(linear_combined_weight, low=0, high=2) *
            torch.randn_like(linear_combined_weight) *
            max_weight_mutation) + 1
        linear_combined_weight *= modifier

    return linear_combined_weight
Example #3
0
 def get_dataiter_(self, mode='train', batch_size=10, shuffle=False, num_workers=0):
     if mode == 'train':
         dataset_h = torch.tensor([i[0] for i in self.train_triple], device=self.device)
         dataset_r = torch.tensor([i[1] for i in self.train_triple], device=self.device)
         dataset_t = torch.tensor([i[2] for i in self.train_triple], device=self.device)
         dataset_h_hat = torch.randint_like(dataset_h, high=self.entity_size, device=self.device)
         dataset_t_hat = torch.randint_like(dataset_t, high=self.entity_size, device=self.device)
         batch_num = self.train_triple_size // batch_size + 1
         for i in range(batch_num):
             yield dataset_h[i*batch_size : i*batch_size+batch_size], \
                   dataset_r[i*batch_size : i*batch_size+batch_size], \
                   dataset_t[i*batch_size : i*batch_size+batch_size], \
                   dataset_h_hat[i*batch_size : i*batch_size+batch_size], \
                   dataset_t_hat[i*batch_size : i*batch_size+batch_size]
     if mode == 'valid':
         dataset_h = torch.tensor([i[0] for i in self.valid_triple], device=self.device)
         dataset_r = torch.tensor([i[1] for i in self.valid_triple], device=self.device)
         dataset_t = torch.tensor([i[2] for i in self.valid_triple], device=self.device)
         for i in range(self.valid_triple_size):
             yield dataset_h[i], \
                   dataset_r[i], \
                   dataset_t[i]
     if mode == 'test':
         dataset_h = torch.tensor([i[0] for i in self.test_triple], device=self.device)
         dataset_r = torch.tensor([i[1] for i in self.test_triple], device=self.device)
         dataset_t = torch.tensor([i[2] for i in self.test_triple], device=self.device)
         for i in range(self.test_triple_size):
             yield dataset_h[i], \
                   dataset_r[i], \
                   dataset_t[i]
Example #4
0
def generate_condition(input_matrix):
    # ref_k_array = np.loadtxt("k_array_ref_gan.txt")
    ref_k_array = torch.as_tensor(input_matrix, dtype=torch.float32)
    random_matrix = torch.randint_like(ref_k_array, 2)
    for x in range(7):
        random_matrix = random_matrix * torch.randint_like(ref_k_array, 2)
    output_matrix = ref_k_array * random_matrix
    # output_matrix = torch.zeros_like(input_matrix)
    return output_matrix.cuda(), torch.as_tensor(random_matrix, dtype=torch.float32, device=device)
Example #5
0
 def reset_parameters(self):
     if self.constant_init:
         nn.init.constant_(self.alpha, 1.)
         if self.random_sign_init:
             if self.probability == -1:
                 with torch.no_grad():
                     factor = torch.ones(
                         self.num_models,
                         device=self.alpha.device).bernoulli_(0.5)
                     factor.mul_(2).add_(-1)
                     self.alpha.data = (self.alpha.t() * factor).t()
                     if self.train_gamma:
                         self.gamma.fill_(1.)
                         self.gamma.data = (self.gamma.t() * factor).t()
             elif self.probability == -2:
                 with torch.no_grad():
                     positives_num = self.num_models // 2
                     factor1 = torch.Tensor(
                         [1 for i in range(positives_num)])
                     factor2 = torch.Tensor([
                         -1 for i in range(self.num_models - positives_num)
                     ])
                     factor = torch.cat([factor1,
                                         factor2]).to(self.alpha.device)
                     self.alpha.data = (self.alpha.t() * factor).t()
                     if self.train_gamma:
                         self.gamma.fill_(1.)
                         self.gamma.data = (self.gamma.t() * factor).t()
             else:
                 with torch.no_grad():
                     self.alpha.bernoulli_(self.probability)
                     self.alpha.mul_(2).add_(-1)
                     if self.train_gamma:
                         self.gamma.bernoulli_(self.probability)
                         self.gamma.mul_(2).add_(-1)
     else:
         nn.init.normal_(self.alpha, mean=1., std=0.5)
         #nn.init.normal_(self.alpha, mean=1., std=1)
         if self.train_gamma:
             nn.init.normal_(self.gamma, mean=1., std=0.5)
             #nn.init.normal_(self.gamma, mean=1., std=1)
         if self.random_sign_init:
             with torch.no_grad():
                 alpha_coeff = torch.randint_like(self.alpha, low=0, high=2)
                 alpha_coeff.mul_(2).add_(-1)
                 self.alpha *= alpha_coeff
                 if self.train_gamma:
                     gamma_coeff = torch.randint_like(self.gamma,
                                                      low=0,
                                                      high=2)
                     gamma_coeff.mul_(2).add_(-1)
                     self.gamma *= gamma_coeff
     if self.bias is not None:
         fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.conv.weight)
         bound = 1 / math.sqrt(fan_in)
         nn.init.uniform_(self.bias, -bound, bound)
Example #6
0
def get_neg_batch(head, tail, entity_num):
    neg_head = head.clone()
    neg_tail = tail.clone()
    if random.random() > 0.5:
        offset_tensor = torch.randint_like(neg_head, entity_num)
        neg_head = (neg_head + offset_tensor) % entity_num
    else:
        offset_tensor = torch.randint_like(neg_tail, entity_num)
        neg_tail = (neg_tail + offset_tensor) % entity_num
    return neg_head, neg_tail
def randomize_graphs(graph_batch):
    t=graph_batch.x.dtype
    graph_batch.x=torch.randint_like(graph_batch.x.type(torch.float), low=0, high=20).type(t)

    t=graph_batch.edge_attr.dtype
    graph_batch.edge_attr=torch.randint_like(graph_batch.edge_attr, low=0, high=4).type(t)

    edge_index_clone=graph_batch.edge_index.clone().detach()
    graph_batch.edge_index[0,:]=edge_index_clone[1,:]
    graph_batch.edge_index[1,:]=edge_index_clone[0,:]    
    return graph_batch
def generate_condition(ref_k_array):
    # ref_k_array = np.loadtxt("k_array_ref_gan.txt")
    ref_k_array = torch.as_tensor(ref_k_array, dtype=torch.float32)
    random_matrix = torch.randint_like(ref_k_array, 2)
    for x in range(6):
        random_matrix = random_matrix * torch.randint_like(ref_k_array, 2)
    print("Number of nonzero elements: ", np.count_nonzero(random_matrix))
    output_matrix = ref_k_array * random_matrix
    plt.matshow(output_matrix)

    # output_matrix = torch.zeros_like(input_matrix)
    return output_matrix.cuda()
 def randomsign_ones(shape, dtype=torch.float):
     """Makes a vector of ones with random sign, 
     or if dtype is torch.cfloat, randomized real or imaginary units"""
     x = torch.zeros(shape)
     if dtype == torch.cfloat:
         random4 = torch.randint_like(x, 4)
         r = x + 1 * (random4 == 0) - 1 * (random4 == 1)
         i = x + 1 * (random4 == 2) - 1 * (random4 == 3)
         out = torch.complex(r, i)
     else:
         random2 = torch.randint_like(x, 2)
         out = x + 1 * (random2 == 0) - 1 * (random2 == 1)
     return torch.tensor(out, dtype=dtype)
def test_zinb_distribution():
    theta = 100.0 + torch.rand(size=(2, ))
    mu = 15.0 * torch.ones_like(theta)
    pi = torch.randn_like(theta)
    x = torch.randint_like(mu, high=20)
    log_p_ref = log_zinb_positive(x, mu, theta, pi)

    dist = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
    log_p_zinb = dist.log_prob(x)
    assert (log_p_ref - log_p_zinb).abs().max().item() <= 1e-8

    torch.manual_seed(0)
    s1 = dist.sample((100, ))
    assert s1.shape == (100, 2)
    s2 = dist.sample(sample_shape=(4, 3))
    assert s2.shape == (4, 3, 2)

    log_p_ref = log_nb_positive(x, mu, theta)
    dist = NegativeBinomial(mu=mu, theta=theta)
    log_p_nb = dist.log_prob(x)
    assert (log_p_ref - log_p_nb).abs().max().item() <= 1e-8

    s1 = dist.sample((1000, ))
    assert s1.shape == (1000, 2)
    assert (s1.mean(0) - mu).abs().mean() <= 1e0
    assert (s1.std(0) - (mu + mu * mu / theta)**0.5).abs().mean() <= 1e0

    size = (50, 3)
    theta = 100.0 + torch.rand(size=size)
    mu = 15.0 * torch.ones_like(theta)
    pi = torch.randn_like(theta)
    x = torch.randint_like(mu, high=20)
    dist1 = ZeroInflatedNegativeBinomial(mu=mu,
                                         theta=theta,
                                         zi_logits=pi,
                                         validate_args=True)
    dist2 = NegativeBinomial(mu=mu, theta=theta, validate_args=True)
    assert dist1.log_prob(x).shape == size
    assert dist2.log_prob(x).shape == size

    with pytest.raises(ValueError):
        ZeroInflatedNegativeBinomial(mu=-mu,
                                     theta=theta,
                                     zi_logits=pi,
                                     validate_args=True)
    with pytest.warns(UserWarning):
        dist1.log_prob(-x)  # ensures neg values raise warning
    with pytest.warns(UserWarning):
        dist2.log_prob(0.5 * x)  # ensures float values raise warning
Example #11
0
 def forward(self):
     a = torch.empty(3, 3).uniform_(0.0, 1.0)
     size = (1, 4)
     weights = torch.tensor([0, 10, 3, 0], dtype=torch.float)
     return (
         # torch.seed(),
         # torch.manual_seed(0),
         torch.bernoulli(a),
         # torch.initial_seed(),
         torch.multinomial(weights, 2),
         torch.normal(2.0, 3.0, size),
         torch.poisson(a),
         torch.rand(2, 3),
         torch.rand_like(a),
         torch.randint(10, size),
         torch.randint_like(a, 4),
         torch.rand(4),
         torch.randn_like(a),
         torch.randperm(4),
         a.bernoulli_(),
         a.cauchy_(),
         a.exponential_(),
         a.geometric_(0.5),
         a.log_normal_(),
         a.normal_(),
         a.random_(),
         a.uniform_(),
     )
Example #12
0
    def get_trace(self, gradsH):
        """
        compute the Hessian vector product with a random vector v, at the current gradient point,
        i.e., compute the gradient of <gradsH,v>.
        :param gradsH: a list of torch variables
        :return: a list of torch tensors
        """

        params = self.param_groups[0]['params']

        v = [torch.randint_like(p, high=2, device='cuda') for p in params]
        for v_i in v:
            v_i[v_i == 0] = -1
        hvs = torch.autograd.grad(gradsH,
                                  params,
                                  grad_outputs=v,
                                  only_inputs=True,
                                  retain_graph=True)

        hutchinson_trace = []
        for hv, vi in zip(hvs, v):
            param_size = hv.size()
            if len(param_size) <= 2:  # for 0/1/2D tensor
                tmp_output = torch.abs(hv * vi)
                hutchinson_trace.append(
                    tmp_output)  # Hessian diagonal block size is 1 here.
            elif len(param_size) == 4:  # Conv kernel
                tmp_output = torch.abs(
                    torch.sum(torch.abs(hv * vi), dim=[2, 3], keepdim=True)
                ) / vi[0, 1].numel(
                )  # Hessian diagonal block size is 9 here: torch.sum() reduces the dim 2/3.
                hutchinson_trace.append(tmp_output)

        return hutchinson_trace
Example #13
0
def collate_fn(batch: list):
    ret = {}
    for key in batch[0].keys():
        ret[key] = [item[key] for item in batch if item[key] is not None]
    try:
        shape = len(ret['real'])
        ret['real'] = torch.stack(ret['real'], dim=0)
        ret['fake'] = torch.stack(ret['fake'], dim=0)
        smooth = ret['smooth'][0]
    except:
        print("empty batch")
        shape = 1
        ret['real'] = torch.zeros((8, 3, 224, 224))
        ret['fake'] = torch.zeros((8, 3, 224, 224))
        smooth = 0

    inputs = torch.cat([ret['real'], ret['fake']], 0)
    labels = torch.cat([torch.zeros((shape, 1)), torch.ones((shape, 1))], 0)
    if smooth != 0:
        mask = (torch.randint_like(labels, 0,
                                   int(1 / smooth) + 1) //
                int(1 / smooth)).bool()
        labels[mask] = -labels[mask] + 1

    return inputs, labels, ret
Example #14
0
 def reset_parameters(self):
     if self.affine:
         for l in self.batch_norms:
             nn.init.constant_(l.bias, 0.)
             if self.constant_init:
                 nn.init.constant_(l.weight, 1.)
                 if self.random_sign_init:
                     return
                     #if self.probability  == -1:
                     #    with torch.no_grad():
                     #        factor = torch.ones(self.num_models, device=
                     #                            self.l.weight.device).bernoulli_(0.5)
                     #        factor.mul_(2).add_(-1)
                     #        l.weight = (l.weight.t() * factor).t()
                     #else:
                     #    with torch.no_grad():
                     #        l.weight.bernoulli_(self.probability)
                     #        l.weight.mul_(2).add_(-1)
             else:
                 nn.init.normal_(l.weight, mean=1., std=0.1)
                 #nn.init.normal_(l.weight, mean=1., std=1.)
                 if self.random_sign_init:
                     with torch.no_grad():
                         weight_coeff = torch.randint_like(l.weight,
                                                           low=0,
                                                           high=2)
                         weight_coeff.mul_(2).add_(-1)
                         l.weight *= weight_coeff
Example #15
0
def get_valid_indices(H: int,
                      W: int,
                      patch_size: int,
                      random_overlap: int = 0):

    vih = torch.arange(random_overlap, H - patch_size - random_overlap + 1,
                       patch_size)
    viw = torch.arange(random_overlap, W - patch_size - random_overlap + 1,
                       patch_size)
    if random_overlap > 0:
        rih = torch.randint_like(vih, -random_overlap, random_overlap)
        riw = torch.randint_like(viw, -random_overlap, random_overlap)
        vih += rih
        viw += riw
    vi = torch.stack(torch.meshgrid(vih, viw)).view(2, -1).t()
    return vi
Example #16
0
def word_dropout_raw(x, l, unk_drop_prob, rand_drop_prob, tokenizer):
    if not unk_drop_prob and not rand_drop_prob:
        return x

    assert unk_drop_prob + rand_drop_prob <= 1
    
    unk_idx = tokenizer.unk_token_id
    vocab_size = tokenizer.vocab_size

    noise = torch.rand(x.size(), dtype=torch.float).to(x.device)
    pos_idx = torch.arange(x.size(1)).unsqueeze(0).expand_as(x).to(x.device)
    token_mask = pos_idx < l.unsqueeze(1)

    x2 = x.clone()
    
    # drop to <unk> token
    if unk_drop_prob:
        unk_drop_mask = (noise < unk_drop_prob) & token_mask
        x2.masked_fill_(unk_drop_mask, unk_idx)

    # drop to random_mask
    if rand_drop_prob:
        rand_drop_mask = (noise > 1 - rand_drop_prob) & token_mask
        rand_tokens = torch.randint_like(x, len(vocab))
        rand_tokens.masked_fill_(1 - rand_drop_mask, 0)
        x2.masked_fill_(rand_drop_mask, 0)
        x2 = x2 + rand_tokens
    
    return x2
Example #17
0
    def forward(self, xdict, cdict, e=None):
        # one step to produce the logits
        _, z_indices = self.encode_to_z(**xdict)
        _, c_indices = self.encode_to_c(**cdict)
        embeddings = None
        if e is not None:
            embeddings = self.encode_to_e(e)

        if self.training and self.pkeep < 1.0:
            mask = torch.bernoulli(
                self.pkeep *
                torch.ones(z_indices.shape, device=z_indices.device))
            mask = mask.round().to(dtype=torch.int64)
            r_indices = torch.randint_like(z_indices,
                                           self.transformer.config.vocab_size)
            a_indices = mask * z_indices + (1 - mask) * r_indices
        else:
            a_indices = z_indices

        cz_indices = torch.cat((c_indices, a_indices), dim=1)

        # target includes all sequence elements (no need to handle first one
        # differently because we are conditioning)
        target = z_indices
        # make the prediction
        logits, _ = self.transformer(cz_indices[:, :-1], embeddings=embeddings)
        # cut off conditioning outputs - output i corresponds to p(z_i | z_{<i}, c)
        logits = logits[:, c_indices.shape[1] - 1:]

        return logits, target
Example #18
0
            def sample(self):
                em = decode.RandomEmitterSet(1000)
                em.phot = torch.rand_like(em.phot) * 10000
                em.frame_ix = torch.randint_like(em.frame_ix, -10, 5000)
                frames, bg_frames = self.forward(em)

                return em, frames, bg_frames
Example #19
0
    def trace(self, maxIter=100, tol=1e-3):
        """
        compute the trace of hessian using Hutchinson's method
        maxIter: maximum iterations used to compute trace
        tol: the relative tolerance
        """

        device = self.device
        trace_vhv = []
        trace = 0.

        for i in range(maxIter):
            self.model.zero_grad()
            v = [
                torch.randint_like(p, high=2, device=device)
                for p in self.params
            ]
            # generate Rademacher random variables
            for v_i in v:
                v_i[v_i == 0] = -1

            if self.full_dataset:
                _, Hv = self.dataloader_hv_product(v)
            else:
                Hv = hessian_vector_product(self.gradsH, self.params, v)
            trace_vhv.append(group_product(Hv, v).cpu().item())
            if abs(np.mean(trace_vhv) - trace) / (trace + 1e-6) < tol:
                return trace_vhv
            else:
                trace = np.mean(trace_vhv)

        return trace_vhv
Example #20
0
def hash_center_multilables(
    labels, Hash_center
):  # label.shape: [batch_size, num_class], Hash_center.shape: [num_class, hash_bits]
    random_center = torch.randint_like(Hash_center[0], 2)
    is_start = True

    for label in labels:

        one_labels = torch.nonzero((label == 1))
        one_labels = one_labels.squeeze(1)
        Center_mean = torch.mean(Hash_center[one_labels], dim=0)

        Center_mean[Center_mean < 0] = -1

        Center_mean[Center_mean > 0] = 1
        random_center[random_center == 0] = -1
        Center_mean[Center_mean == 0] = random_center[Center_mean == 0]
        Center_mean = Center_mean.view(1, -1)

        if is_start:  # the first time
            hash_center = Center_mean
            is_start = False
        else:
            hash_center = torch.cat((hash_center, Center_mean), 0)

    return hash_center
Example #21
0
def test_cg_embedding():
    # Tests if the embedding layer produces zero embeddings, same embeddings
    # for the same properties and different embeddings for different properties

    embedding_layer = CGBeadEmbedding(n_embeddings=n_embeddings,
                                      embedding_dim=n_feats)

    # Create a tensor full of zeroes
    zero_properties = torch.zeros(size=(frames, beads), dtype=torch.long)
    # Create a tensor with the same values
    same_properties = zero_properties + n_embeddings - 1
    # Create a tensor with random values
    random_properties = torch.randint_like(zero_properties, high=n_embeddings)

    # Test if passing zeroes produces an embedding full of zeroes
    zero_embedding = embedding_layer.forward(zero_properties).detach().numpy()
    np.testing.assert_equal(zero_embedding, 0.)

    # Test if passing the same value produces the same embedding
    same_embedding = embedding_layer.forward(same_properties).detach().numpy()
    assert np.all(same_embedding)

    # Test if passing different values produce different embeddings
    random_embedding = embedding_layer.forward(
        random_properties).detach().numpy()
    assert not np.all(random_embedding)
Example #22
0
 def apply(self, x: torch.Tensor, **kwargs):  # pylint: disable=unused-argument
     assert x.ndim == 3
     m = self.bernoulli.sample(x.size()).to(x.device)
     m = m * x.gt(0).float()
     noise_value = 1 + torch.randint_like(x, self.min_, self.max_ + 1).to(
         x.device)  # 1 or 2
     return x * (1 - m) + noise_value * m
Example #23
0
def randomize_tokens(tokens, mask, tokenizer):
    """ Return tokens randomly masked using standard BERT probabilities. """
    targets = torch.ones_like(tokens) * -1

    # get random data
    p = torch.rand_like(tokens.float()) * mask.float()
    random_tokens = torch.randint_like(tokens, len(tokenizer.vocab))

    # set targets for masked tokens
    thresh = 0.85
    targets[p >= thresh] = tokens[p >= thresh]

    # progressively overwrite tokens while increasing the threshold

    # replace 80% with '[MASK]' token
    tokens[p >= thresh] = tokenizer.vocab["[MASK]"]

    # replace 10% with a random word
    thresh = 0.85 + 0.15 * 0.8
    tokens[p >= thresh] = random_tokens[p >= thresh]

    # keep 10% unchanged
    thresh = 0.85 + 0.15 * 0.9
    tokens[p >= thresh] = targets[p >= thresh]

    return tokens, targets
Example #24
0
    def _sample_noise(self, x: torch.tensor, num: int,
                      batch_size) -> np.ndarray:
        """ Sample the base classifier's prediction under noisy corruptions of the input x.

        :param x: the input [channel x width x height]
        :param num: number of samples to collect
        :param batch_size:
        :return: an ndarray[int] of length num_classes containing the per-class counts
        """
        with torch.no_grad():
            counts = np.zeros(self.num_classes, dtype=int)
            for _ in range(ceil(num / batch_size)):
                this_batch_size = min(batch_size, num)
                num -= this_batch_size

                batch = x.repeat((this_batch_size, 1, 1, 1))
                # noise = torch.randn_like(batch, device='cuda') * self.sigma
                mask = self.m.sample(batch.shape).squeeze(-1)
                rand_inputs = torch.randint_like(
                    batch, low=0, high=self.K + 1, device='cuda') / float(
                        self.K)
                batch = batch * mask + rand_inputs * (1 - mask)

                predictions = self.base_classifier(batch).argmax(1)
                counts += self._count_arr(predictions.cpu().numpy(),
                                          self.num_classes)
            return counts
Example #25
0
def generate_condition(input_matrix, density=10):
    # ref_k_array = np.loadtxt("k_array_ref_gan.txt")
    ref_k_array = torch.as_tensor(input_matrix, dtype=torch.float32)
    random_matrix = torch.randint_like(ref_k_array, 2)
    for x in range(density):
        random_matrix = random_matrix * torch.randint_like(ref_k_array, 2)

    # Enlarge condition points
    sf = 2
    avg_downsampler = torch.nn.MaxPool2d((sf, sf), stride=(sf, sf))
    random_matrix = avg_downsampler(random_matrix)
    random_matrix = F.interpolate(random_matrix, scale_factor=sf, mode="nearest")

    output_matrix = ref_k_array * random_matrix
    # output_matrix = torch.zeros_like(input_matrix)
    return output_matrix.cuda(), torch.as_tensor(random_matrix, dtype=torch.float32, device=device)
Example #26
0
    def test_function(self):
        """
        test function
        """
        print("testing")
        device = self.device

        self.model.zero_grad()

        for p in self.params:
            print(p.size())

        # Generate Rademacher random variable
        v = [torch.randint_like(p, high=2, device=device) for p in self.params]
        for v_i in v:
            v_i[v_i == 0] = -1
        # Multiply with Hessian
        if self.full_dataset:
            _, Hv = self.dataloader_hv_product(v)
        else:
            Hv = hessian_vector_product(self.gradsH, self.params, v)
        #print(type(v))
        print(type(Hv))
        print(len(Hv))
        for hi in Hv:
            print(type(hi))
        #for vi in v:
        '''
Example #27
0
    def ds(self, request):
        class DummyFrameProc:
            def forward(x: torch.Tensor):
                return x.clamp(0., 0.5)

        class DummyEmProc:
            def forward(em: decode.generic.emitter.EmitterSet):
                return em[em.xyz[:, 0] <= 16]

        class DummyWeight:
            def forward(self, *args, **kwargs):
                return torch.rand((1, 1, 32, 32))

        n = 100

        em = decode.generic.emitter.RandomEmitterSet(n * 100)
        em.frame_ix = torch.randint_like(em.frame_ix, n + 1)

        dataset = can.SMLMStaticDataset(
            frames=torch.rand((n, 32, 32)),
            emitter=em.split_in_frames(0, n - 1),
            frame_proc=DummyFrameProc,
            bg_frame_proc=None,
            em_proc=DummyEmProc,
            tar_gen=decode.neuralfitter.target_generator.
            UnifiedEmbeddingTarget((-0.5, 31.5), (-0.5, 31.5), (32, 32),
                                   roi_size=1,
                                   ix_low=0,
                                   ix_high=0),
            weight_gen=DummyWeight(),
            frame_window=request.param,
            pad='same',
            return_em=True)

        return dataset
Example #28
0
    def loss(self, criterion, srcs, tgts, refs=None):
        refs = srcs if tgts is None else torch.cat((srcs, tgts))
        segments = torch.zeros_like(srcs) if tgts is None \
            else torch.cat((torch.zeros_like(srcs), torch.ones_like(tgts)))

        slen, bsz = refs.size()
        sampler = self._sampling(refs)

        rnd = torch.rand((slen, bsz), device=refs.device)
        mask = (self.masked_rate >= rnd) & sampler

        # replace mask tokens
        inputs = torch.where(
            (self.masked_rate >= rnd) & mask,
            torch.ones_like(refs) * self.mask_idx, 
            refs,
        )
   
        # replace random tokens
        th = self.masked_rate + self.replaced_rate
        inputs = torch.where(
            (th >= rnd) & (rnd > self.masked_rate) & sampler, 
            torch.randint_like(inputs, self.mask_idx+1, self.vocabsize),
            inputs,
        )
        outs = self.forward(inputs, segments).view(slen*bsz, -1)
        loss = criterion(outs, refs.view(-1))
        return loss
Example #29
0
    def test_clone(self):
        N = 5
        mesh = TestMeshes.init_mesh(N, 10, 100)
        for force in [0, 1]:
            if force:
                # force mesh to have computed attributes
                mesh.verts_packed()
                mesh.edges_packed()
                mesh.verts_padded()

            new_mesh = mesh.clone()

            # Modify tensors in both meshes.
            new_mesh._verts_list[0] = new_mesh._verts_list[0] * 5
            mesh._num_verts_per_mesh = torch.randint_like(
                mesh.num_verts_per_mesh(), high=10
            )

            # Check cloned and original Meshes objects do not share tensors.
            self.assertFalse(
                torch.allclose(new_mesh._verts_list[0], mesh._verts_list[0])
            )
            self.assertFalse(
                torch.allclose(
                    mesh.num_verts_per_mesh(), new_mesh.num_verts_per_mesh()
                )
            )
            self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())
            self.assertSeparate(new_mesh.verts_padded(), mesh.verts_padded())
            self.assertSeparate(new_mesh.faces_packed(), mesh.faces_packed())
            self.assertSeparate(new_mesh.faces_padded(), mesh.faces_padded())
            self.assertSeparate(new_mesh.edges_packed(), mesh.edges_packed())
Example #30
0
    def __init__(self, M_in, N_in, config, HZ=0.4e12):
        super(DiffractiveLayer, self).__init__()
        self.SomeInit(M_in, N_in, HZ)
        assert config is not None
        self.config = config
        #self.init_value = init_value
        #self.rDrop = rDrop
        if self.config.wavelet is None:
            if self.config.modulation == "phase":
                self.transmission = torch.nn.Parameter(data=torch.Tensor(
                    self.size, self.size),
                                                       requires_grad=True)
            else:
                self.transmission = torch.nn.Parameter(data=torch.Tensor(
                    self.size, self.size, 2),
                                                       requires_grad=True)

            init_param = self.transmission.data
            if self.config.init_value == "reverse":  #
                half = self.transmission.data.shape[-2] // 2
                init_param[..., :half, :] = 0
                init_param[..., half:, :] = np.pi
            elif self.config.init_value == "random":
                init_param.uniform_(0, np.pi * 2)
            elif self.config.init_value == "random_reverse":
                init_param = torch.randint_like(init_param, 0, 2) * np.pi
            elif self.config.init_value == "chunk":
                sections = split__sections()
                for xx in init_param.split(sections, -1):
                    xx = random.random(0, np.pi * 2)