예제 #1
0
def robert_cross_maj_cuda_nx(rands, rands2, x11, x12, x21, x22, x11_n, x12_n, x21_n, x22_n):
    """No-xor version of rober_cross_maj_cuda"""
    top = maj_p_cuda(x11, x22_n, rands)
    top_inv = maj_p_cuda(x11_n, x22, rands)
    top_max = torch.bitwise_or(top, top_inv)
    bot = maj_p_cuda(x12, x21_n, rands)
    bot_inv = maj_p_cuda(x12_n, x21, rands)
    bot_max = torch.bitwise_or(bot, bot_inv)
    return maj_p_cuda(top_max, bot_max, rands2)
예제 #2
0
def maj_p_cuda(x, y, rands):
    #FIX THIS ASAP
    #global mask 
    #mask = torch.cuda.ByteTensor([2 ** x for x in range(8)])

    xs = x.shape[0]
    ys = y.shape[0]
    assert xs == ys
    and_ = torch.bitwise_and(x, y)
    or_ = torch.bitwise_or(x, y)
    top = torch.bitwise_and(and_, torch.bitwise_not(rands))
    bot = torch.bitwise_and(or_, rands)
    return torch.bitwise_or(top, bot)
    def forward(self, x: Tensor) -> Tensor:
        pos_idx = torch.gt(x, 0.)
        y = torch.zeros_like(x)
        if x.is_cuda:
            if x.numel() < mnn_config.get_value('cpu_or_gpu'):
                device = x.device
                temp = torch.from_numpy(
                    math.pi / 2 *
                    scipy.erfi(x[pos_idx].cpu().numpy())).to(device=device)
            else:
                temp = math.pi / 2 * self.erfi(x[pos_idx])
        else:
            temp = torch.from_numpy(math.pi / 2 *
                                    scipy.erfi(x[pos_idx].numpy()))

        idx = torch.bitwise_or(torch.lt(x, self.cheb_xmin_for_G),
                               torch.gt(x, -self.cheb_xmin_for_G))
        y[idx] = self.integrate_asym_neg_inf(-torch.abs(x[idx]))
        idx.bitwise_not_()
        y[idx] = chebyshev_val_no_transform(-torch.abs_(x[idx]),
                                            self.cheb_G_neg,
                                            x_min=self.cheb_xmin_for_G,
                                            x_max=0.,
                                            num_sub=self.div)

        y[pos_idx] += temp
        return y
예제 #4
0
    def __getitem__(self, index):
        identifier = index + 1

        # extract image tile
        image_tile = self._image_topology.tile(identifier).np_image
        # remove alpha channel and convert to RGB
        image_tile = cv2.cvtColor(image_tile, cv2.COLOR_BGRA2RGB)
        # convert to tensor
        image_tile = torch.from_numpy(self._check_padding(image_tile)).permute(
            2, 0, 1)

        mask_tile = None
        for i in range(len(self._mask_topology)):
            # extract mask tile
            tile = self._mask_topology[i].tile(identifier).np_image
            # remove alpha channel and convert to binary mask
            tile = cv2.cvtColor(tile, cv2.COLOR_BGRA2GRAY)
            _, tile = cv2.threshold(tile, 1, 1, cv2.THRESH_BINARY)
            tile = np.expand_dims(tile, 2)
            # convert to tensor
            tile = torch.from_numpy(self._check_padding(tile)).permute(2, 0, 1)

            if i == 0:
                mask_tile = torch.ones((1, tile.shape[1], tile.shape[2]),
                                       dtype=torch.int)
                mask_tile = torch.cat((mask_tile, tile), dim=0)
            # update background channel
            mask_tile[0, :, :] = relu(mask_tile[0, :, :] - tile)

            if self._mask_merge:
                mask_tile[1, :, :] = torch.bitwise_or(mask_tile[1, :, :], tile)
            elif i != 0:
                mask_tile = torch.cat((mask_tile, tile), dim=0)

        return image_tile.float(), mask_tile.float(), identifier
예제 #5
0
def get_placements(curr_state, device):
  '''Given a state curr_state, compute legal next states due to placing
  a piece
  
  Input: a (channels, config.rows, config.cols) bool tensor representing
         the board state
  
  Output: a (branchNum, channels, config.rows, config.cols) bool tensor
          with dim=0 indexing the possible next states and brancNum
          the number of legal moves.
  
  '''

  # Consider only the ball and player layers and sum them and invert get legal positions
  players = curr_state.select(0, PLAYER_CHANNEL) # view
  ball    = curr_state.select(0, BALL_CHANNEL)   # view
  legal   = torch.bitwise_or(players, ball).bitwise_not_() # new tensor

  legal_indices = legal.flatten().nonzero(as_tuple=True)[0]

  if device not in placements_static:
    placements_static[device] = create_placements(device)
  
  placements = placements_static[device]

  new_placements = placements.index_select(0, legal_indices)
  new_states     = new_placements + curr_state
  
  return new_states
예제 #6
0
    def update(self, **kwargs) -> None:
        """
        TODO.

        Implement the dynamics and updating rule. You might need to call the
        parent method. Make sure to consider the reward value as a given keyword
        argument.
        """
        self.lr = kwargs.get("lr", .1)
        dopamine = kwargs.get("dopamine", None)
        pre_trace = self.connection.pre.traces.reshape((*self.connection.pre.shape,*[1]*len(self.connection.post.shape)))
        post_trace = self.connection.post.traces.reshape((*[1]*len(self.connection.pre.shape),*self.connection.post.shape))
        x = (
                self.lr * pre_trace * self.connection.post.s.reshape(*post_trace.shape)
                - self.lr * post_trace * self.connection.pre.s.reshape(*pre_trace.shape)
            ) - self.weight_decay * self.connection.w

        # print(self.connection.pre.s.type(torch.int))
        self.c += self.connection.dt * (-self.c/self.tau_c + x * torch.bitwise_or(
                self.connection.pre.s.reshape(*pre_trace.shape).type(torch.int), 
                self.connection.post.s.reshape(*post_trace.shape).type(torch.int)
            ))
        # print(self.c)
        # r = self.DA(time, self.connection.pre.s.sum(), self.connection.post.s.sum())
        # # self.c += self.connection.dt * (-self.c/self.tau_c + x * torch.bitwise_or(
        # #     self.connection.post.s[0], self.connection.post.s[1]
        # #     ))
        # # r = self.DA(time, self.connection.post.s[0], self.connection.post.s[1])

        # dopamine = self.reward.compute(reward=r)
        self.connection.w += self.connection.dt * (self.c * dopamine)

        return
    def func_dawson_2nd(self, x: Tensor) -> Tensor:
        y = torch.zeros_like(x)
        idx1 = torch.lt(x, -10.)
        idx2 = torch.gt(x, 10.)

        y[idx1] = self.func_asym_neg_inf(x[idx1])
        y[idx2] = self.func_asym_pos_inf(x[idx2])

        idx1 = torch.bitwise_not(torch.bitwise_or(idx1, idx2))
        y[idx1] = chebyshev_val_neg(-x[idx1].abs_(),
                                    self.cheb_neg,
                                    num_sub=self.div)

        idx1 = torch.bitwise_and(idx1, x > 0)
        if x.is_cuda:
            if x[idx1].numel() < mnn_config.get_value('cpu_or_gpu'):
                device = x.device
                temp = torch.from_numpy(scipy.erfi(
                    x[idx1].cpu().numpy())).to(device=device)
                y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
                          (0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * temp) - y[idx1]
            else:
                y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
                          (0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * self.dawson1.erfi(x[idx1])) - \
                          y[idx1]
        else:
            y[idx1] = math.sqrt(math.pi) * torch.exp(torch.pow(x[idx1], 2)) * \
                      (0.5 * math.log(2) + 2 * self.dawson1(-x[idx1]) + math.pi / 2 * torch.from_numpy(
                          scipy.erfi(x[idx1].numpy()))) - y[idx1]
        return y
예제 #8
0
def make_SA_bool(weights, mask, mask1):
    ## Inject errors
    # output = ((weights + mask) > 0.)  # inject stuck at 0
    # output = ((output - mask1)> 0.)   # inject stuck at 1
    not_mask0 = torch.bitwise_not(mask)
    output = torch.bitwise_and(weights, not_mask0)  # inject stuck at 0
    output = torch.bitwise_or(output, mask1)  # inject stuck at 1
    return output
def accuracy(predict, target):
    assert predict.shape == target.shape

    and_sum = torch.bitwise_and(predict.type(torch.IntTensor),
                                target.type(torch.IntTensor)).sum()
    or_sum = torch.bitwise_or(predict.type(torch.IntTensor),
                              target.type(torch.IntTensor)).sum()

    return and_sum / or_sum / predict.shape[0]
예제 #10
0
 def bitwise_xor(a, b):
     """
     Bitwise XOR of Tensors via int intermediate
     """
     # Convert input from float to byte
     a = a.add(.5).mul(256.).round().int()
     b = b.add(.5).mul(256.).round().int()
     # Bitwise XOR on integers, convert back to float
     return torch.bitwise_or(a, b).div(256.).sub(.5)
예제 #11
0
def get_accuracy(x, x_o):
    res = 0
    for t in range(1, x.shape[0]):
        v, v_o = x[t].int(), x_o[t].int()
        try:
            res += int(torch.bitwise_and(v, v_o).sum()) / int(
                torch.bitwise_or(v, v_o).sum())
        except ZeroDivisionError:
            res += 1
    return res / x.shape[0]
예제 #12
0
 def __call__(self, x: Tensor, y: LongTensor) -> Tensor:
     y = y.flatten(0, -1)
     y_float = torch.zeros(x.shape[0] * x.shape[1], self.nc, device=x.device, dtype=torch.float)
     y_float.fill_(self.mass_redistribution / (self.nc-(1 + len(self.ignore_index))))
     y_float.scatter_(1, y.unsqueeze(1), 1 - self.mass_redistribution)
     mask = torch.zeros_like(y, dtype=torch.bool)
     for idx in self.ignore_index:
         mask = torch.bitwise_or(mask, y == idx)
     y_float[mask.unsqueeze(1).repeat(1, self.nc)] = 0
     return self.loss_fn(torch.log_softmax(x.view(-1, self.nc), dim=-1), y_float)
예제 #13
0
def bitwise_or(input_, other):
    """Wrapper of `torch.bitwise_or`.

    Parameters
    ----------
    input_ : DTensor
        The first operand.
    other : DTensor
        The second operand.
    """
    return torch.bitwise_or(input_._data, other._data)
예제 #14
0
def mux_p_cuda(x, y, rands):
    #global mask 
    #mask = torch.cuda.ByteTensor([2 ** x for x in range(8)])

    xs = x.shape[0]
    ys = y.shape[0]
    assert xs == ys
    #rands = torch.cuda.FloatTensor(xs << 3).uniform_() > p
    #rands = torch.sum(rands.view(xs, 8) * mask, 1)
    top = torch.bitwise_and(x, rands)
    bot = torch.bitwise_and(y, torch.bitwise_not(rands))
    return torch.bitwise_or(top, bot)
예제 #15
0
파일: eval.py 프로젝트: by256/bpartis
def compute_iou(pred_mask, gt_mask):
    """
    Computes IoU between predicted instance mask and 
    ground-truth instance mask
    """

    pred_mask = pred_mask.byte().squeeze()
    gt_mask = gt_mask.byte().squeeze()
    # print('pred_masks', pred_mask.shape, 'gt_masks', gt_mask.shape)
    intersection = torch.bitwise_and(pred_mask, gt_mask).sum().float()
    union = torch.bitwise_or(pred_mask, gt_mask).sum().float()
    return intersection / union
    def _gpu_dawson(self, x: Tensor) -> Tensor:
        y = torch.zeros_like(x)
        region1 = torch.bitwise_or(torch.lt(x, self.cheb_xmin_for_G),
                                   torch.gt(x, -self.cheb_xmin_for_G))
        y[region1] = self.asym_neg_inf(-torch.abs(x[region1]))

        region1.bitwise_not_()
        y[region1] = chebyshev_val_neg(-torch.abs(x[region1]),
                                       self.cheb_g_neg,
                                       num_sub=self.div)
        region1 = torch.gt(x, 0.)
        y[region1] = math.sqrt(math.pi) * torch.exp(
            x[region1].pow(2)) - y[region1]
        return y
예제 #17
0
    def _set_scores_to_inf_for_banned_tokens(
        self, scores: torch.Tensor, banned_tokens: List[List[int]]
    ) -> torch.Tensor:
        """
        Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be a
        list of list of banned tokens to ban in the format [[batch index, vocabulary position],...

        Args:
            scores: logits distribution of shape (batch size, vocabulary size)
            banned_tokens: list of list of tokens to ban of length (batch_size)
        """
        banned_mask_list = []
        for idx, batch_banned_tokens in enumerate(banned_tokens):
            for token in batch_banned_tokens:
                # Eliminates invalid bad word IDs that are over the vocabulary size.
                if token <= scores.shape[1]:
                    banned_mask_list.append([idx, token])
                else:
                    logger.error(
                        f"An invalid bad word ID is defined: {token}. This ID is not contained in the "
                        "vocabulary, and is therefore ignored."
                    )
        if not banned_mask_list and self.static_bad_words_mask is None:
            return scores

        else:
            if banned_mask_list:
                banned_mask = torch.LongTensor(banned_mask_list)
                indices = torch.ones(len(banned_mask))
                # A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates:
                # [ 0  1  1 ]
                # [ 0  0  0 ]
                # [ 1  0  0 ]

                banned_mask = (
                    torch.sparse.LongTensor(banned_mask.t(), indices, scores.size())
                    .to(scores.device)
                    .to_dense()
                    .bool()
                )

                if self.static_bad_words_mask is not None:
                    banned_mask = torch.bitwise_or(banned_mask, self.static_bad_words_mask)
            else:
                banned_mask = self.static_bad_words_mask

            scores = scores.masked_fill(banned_mask, -float("inf"))
            return scores
예제 #18
0
파일: oagcnn.py 프로젝트: imatge-upc/OAGCNN
 def update_active_objects(self, new_gt_masks, new_valid_targets):
     objs_changes = torch.ne(self.active_valid_targets, new_valid_targets)
     if objs_changes.any():
         # We just care about objects that are new (1st appearance) on the clip -> batched_valid_target == True and valid_masks_record
         # == False on the positions where there are changes
         new_appearance_ids = torch.bitwise_and(
             torch.bitwise_and(objs_changes, new_valid_targets),
             torch.bitwise_and(torch.logical_not(self.active_valid_targets),
                               objs_changes))
         # Check if there is any appearance
         if new_appearance_ids.any():
             self.active_valid_targets = torch.bitwise_or(
                 self.active_valid_targets, new_appearance_ids)
             mask_to_op = torch.zeros_like(self.active_objs_masks)
             mask_to_op[new_appearance_ids, :, :] = 1
             self.active_objs_masks = mask_to_op * new_gt_masks + self.active_objs_masks
예제 #19
0
 def compress(self, tensor):
     tensor_flatten = tensor.flatten()
     tensor_cast = tensor_flatten.view(torch.int32)
     sign = tensor_cast & -2147483648
     exp = tensor_cast & 2139095040
     mantissa = tensor_cast & 8388607
     exp_add_one = mantissa > torch.randint(
         low=0,
         high=0b00000000011111111111111111111111,
         size=tensor_flatten.shape,
         dtype=torch.int32,
         device=self.device)
     exponent = torch.where(exp_add_one,
                            exp + 0b00000000100000000000000000000000, exp)
     exp_shift = torch.clip(exponent,
                            min=0b00001001000000000000000000000000,
                            max=0b01001000100000000000000000000000)
     exps = exp_shift >> 23
     exps = torch.bitwise_or(sign >> 24, exps - 18)
     tensor_compressed = exps.to(torch.uint8)
     return tensor_compressed
예제 #20
0
def test_attention():
    torch.manual_seed(0)
    scores = torch.rand(2, 3, 4)  # [B, T, S]
    values = torch.rand(2, 4, 5)  # [B, S, E]

    q_padding_mask = torch.tensor([[0, 0, 1], [0, 1, 1]], dtype=torch.bool)
    assert q_padding_mask.shape == torch.Size([2, 3])

    key_padding_mask = torch.tensor([[0, 0, 0, 1], [0, 0, 1, 1]],
                                    dtype=torch.bool)
    assert key_padding_mask.shape == torch.Size([2, 4])

    padding_mask = torch.bitwise_or(q_padding_mask.unsqueeze(-1),
                                    key_padding_mask.unsqueeze(-2))

    values, weights = attention(scores,
                                values,
                                q_padding_mask,
                                key_padding_mask,
                                out_weights=True)
    assert (weights.masked_select(padding_mask) == 0).all()
def iou_pytorch(outputs: torch.Tensor, labels: torch.Tensor):
    # You can comment out this line if you are passing tensors of equal shape
    # But if you are passing output from UNet or something it will most probably
    # be with the BATCH x 1 x H x W shape
    outputs = outputs.squeeze()  # BATCH x 1 x H x W => BATCH x H x W
    labels = labels.squeeze()
    SMOOTH = 1e-6
    intersection = torch.count_nonzero(
        torch.bitwise_and(
            outputs > 0, labels >
            0))  #.double().sum()  # Will be zero if Truth=0 or Prediction=0
    union = torch.count_nonzero(torch.bitwise_or(
        outputs > 0,
        labels > 0))  #.double().sum()         # Will be zzero if both are 0

    iou = (intersection + SMOOTH) / (union + SMOOTH
                                     )  # We smooth our devision to avoid 0/0

    # thresholded = torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10  # This is equal to comparing with thresolds

    return iou.mean()  #if you are interested in average across the batch
예제 #22
0
 def evaluations(self, trainloader, valloader):
     K = self.kwargs['nc']
     with torch.no_grad():
         self.net.eval()
         for i, loader in enumerate((trainloader, valloader)):
             IOUs = [0 for _ in range(K - 1)]; cnt = 0
             for x, y in tqdm.tqdm(loader, desc = "Evaluating...", leave = False, mininterval = 60):
                 x = x.cuda(); y = y.cuda()
                 yhat = self.net(x).argmax(1)
                 tmp = 0
                 for k in range(1, K):
                     union = torch.sum(torch.bitwise_or(yhat == k, y == k), dim = (1, 2)).cpu() * 1.
                     intersect = torch.sum(torch.bitwise_and(yhat == k, y == k), dim = (1, 2)).cpu() * 1.
                     iou = torch.sum(intersect / (union + 1e-10))
                     IOUs[k - 1] += iou
                 cnt += len(x)
             printOut(["Train", "Val"][i] + " ".join([" IOU%d: %.4f" % (k + 1, IOUs[k] / cnt) for k in range(len(IOUs))]) + " IOU: %.4f" % (sum(IOUs) / len(IOUs) / cnt))
     plt.plot(self.losses['train'], label = "train")
     plt.plot(self.losses['val'], label = "val")
     plt.legend()
     plt.savefig(plotpath)
예제 #23
0
    def forward(self,
                xtoken_seq,
                char_seq,
                special_symbols,
                num_tokens,
                max_form_len,
                max_num_labels,
                target_chars=None):
        morph_scores, morph_states, _ = super().forward(
            xtoken_seq, char_seq, special_symbols, num_tokens, max_form_len,
            max_num_labels, target_chars)
        if target_chars is not None:
            morph_chars = target_chars
        else:
            morph_chars, _ = self.decode(morph_scores, [])
            morph_chars = morph_chars.squeeze(0)
        eos, sep = special_symbols['</s>'], special_symbols['<sep>']
        eos_mask = torch.eq(morph_chars[:num_tokens], eos)
        eos_mask[:, -1] = True
        eos_mask = torch.bitwise_and(
            torch.eq(torch.cumsum(eos_mask, dim=1), 1), eos_mask)

        sep_mask = torch.eq(morph_chars[:num_tokens], sep)
        sep_mask = torch.bitwise_and(
            torch.eq(torch.cumsum(eos_mask, dim=1), 0), sep_mask)

        seg_state_mask = torch.bitwise_or(eos_mask, sep_mask)
        seg_states = morph_states[seg_state_mask]
        enc_seg_scores, _ = self.encoder(seg_states.unsqueeze(dim=1))
        enc_seg_scores = self.seg_dropout(enc_seg_scores)
        label_scores = []
        seg_sizes = torch.sum(seg_state_mask, dim=1)
        for classifier in self.classifiers:
            scores = classifier(enc_seg_scores)
            scores = torch.split_with_sizes(scores.squeeze(dim=1),
                                            tuple(seg_sizes))
            scores = nn.utils.rnn.pad_sequence(scores, batch_first=True)
            fill_len = max_num_labels - scores.shape[1]
            label_scores.append(F.pad(scores, (0, 0, 0, fill_len)))
        return morph_scores, morph_states, label_scores
예제 #24
0
 def forward(self, a, b):
     c = torch.bitwise_or(a, b)
     d = torch.bitwise_or(a, c)
     return d
예제 #25
0
 def perturb(self, tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
     return torch.bitwise_or(tensor, mask)
예제 #26
0
    def forward(self, batch_size, beam_size, max_seq_len, memory,
                memory_seq_lens):
        extended_memory = tile(memory, beam_size)
        batchxbeam = extended_memory.size(0)
        extended_memory = extended_memory.transpose(0, 1).contiguous()

        extended_memory_seq_lens = tile(memory_seq_lens, beam_size)
        start_ids = extended_memory_seq_lens.new_full((batchxbeam, ),
                                                      self.start_id,
                                                      dtype=torch.int64)

        initial_log_probs = extended_memory.new_full((beam_size, ),
                                                     -float("inf"),
                                                     dtype=torch.float32)
        initial_log_probs[0] = 0.
        initial_log_probs = initial_log_probs.repeat(batch_size)
        sequence_lengths = extended_memory_seq_lens.new_full((batchxbeam, ), 0)
        finished = extended_memory_seq_lens.new_full((batchxbeam, ),
                                                     0,
                                                     dtype=torch.bool)

        dtype_info = torch.finfo(extended_memory.dtype)
        eos_max_prob = extended_memory.new_full((batchxbeam, self.vocab_size),
                                                dtype_info.min)
        eos_max_prob[:, self.end_id] = dtype_info.max

        self.decoder.init_state(extended_memory, extended_memory, None)
        word_ids = start_ids
        cum_log_probs = initial_log_probs

        for step in range(max_seq_len):
            if not torch.bitwise_not(finished).any():
                break
            word_ids = word_ids.view(1, -1, 1)
            dec_out, dec_attn = self.decoder(
                word_ids,
                extended_memory,
                memory_lengths=extended_memory_seq_lens,
                step=step)
            logits = self.generator(dec_out.squeeze(0))
            logits = torch.where(finished.view(-1, 1), eos_max_prob,
                                 logits).to(torch.float32)
            log_probs = self.logsoftmax(logits.to(torch.float32))

            total_probs = log_probs + torch.unsqueeze(cum_log_probs, 1)
            total_probs = total_probs.view(-1, beam_size * self.vocab_size)

            # beamsearch
            # _, sample_ids = torch.topk(total_probs, beam_size)
            # sample_ids = sample_ids.view(-1)

            #diversesiblingsearch
            sibling_score = torch.arange(1, beam_size + 1).to(
                total_probs.dtype).to(extended_memory.device
                                      ) * self.diversity_rate  # [beam_size]
            scores, ids = torch.topk(
                total_probs.view(-1, beam_size, self.vocab_size),
                beam_size)  # [batch size, beam width, beam width]
            scores = scores + sibling_score  # [batch size, beam width, beam width]
            scores = scores.view(-1, beam_size * beam_size)
            ids = ids + torch.unsqueeze(
                torch.unsqueeze(
                    torch.arange(0, beam_size).to(extended_memory.device) *
                    self.vocab_size, 0), -1)
            ids = ids.view(-1, beam_size * beam_size)
            _, final_ids = torch.topk(scores,
                                      beam_size)  # [batch size, beam size]
            final_ids = final_ids.view(-1, 1)
            batch_index = torch.arange(0, batch_size).to(
                extended_memory.device).view(-1,
                                             1).repeat(1,
                                                       beam_size).view(-1, 1)
            index = torch.cat([batch_index, final_ids], 1)
            sample_ids = gather_nd(ids, index)

            word_ids = sample_ids % self.vocab_size  # [batch_size * beam_size]
            beam_ids = sample_ids // self.vocab_size  # [batch_size * beam_size]
            beam_indices = (torch.arange(batchxbeam).to(extended_memory.device)
                            // beam_size) * beam_size + beam_ids

            sequence_lengths = torch.where(finished, sequence_lengths,
                                           sequence_lengths + 1)

            batch_pos = torch.arange(batchxbeam).to(
                extended_memory.device) // beam_size
            next_cum_log_probs = gather_nd(
                total_probs, torch.stack([batch_pos, sample_ids],
                                         -1))  # [batch_size * beam_size]
            finished = finished.index_select(0, beam_indices)
            sequence_lengths = sequence_lengths.index_select(0, beam_indices)

            self.decoder.map_state(
                lambda state, dim: state.index_select(dim, beam_indices))
            if step == 0:
                parent_ids = beam_ids.view(1, -1)
                output_ids = word_ids.view(1, -1)
            else:
                parent_ids = torch.cat((parent_ids, beam_ids.view(1, -1)))
                output_ids = torch.cat((output_ids, word_ids.view(1, -1)))
            cum_log_probs = torch.where(finished, cum_log_probs,
                                        next_cum_log_probs)
            finished = torch.bitwise_or(finished,
                                        torch.eq(word_ids, self.end_id))

        beams, lengths = finalize(beam_size,
                                  output_ids,
                                  parent_ids,
                                  sequence_lengths,
                                  self.end_id,
                                  args=self.args)
        return beams, lengths
예제 #27
0
def max_pool_cuda(x11, x12, x21, x22):
    or1 = torch.bitwise_or(x11, x22)
    or2 = torch.bitwise_or(x12, x21)
    return torch.bitwise_or(or1, or2)
예제 #28
0
파일: evalAE.py 프로젝트: wx-b/DECOR-GAN
def eval_LP_Div_IOU(config):
    if torch.cuda.is_available():
        device = torch.device('cuda')
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device('cpu')

    result_dir = "output_for_eval"
    if not os.path.exists(result_dir):
        print("ERROR: result_dir does not exist! " + result_dir)
        exit(-1)

    patches_dir = "unique_patches"
    if not os.path.exists(patches_dir):
        print("ERROR: patches_dir does not exist! " + patches_dir)
        exit(-1)

    output_dir = "eval_output"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    #load style shapes
    fin = open("splits/" + config.data_style + ".txt")
    styleset_names = [name.strip() for name in fin.readlines()]
    fin.close()
    styleset_len_original = len(styleset_names)
    styleset_len = min(styleset_len_original, max_num_of_styles)

    #load content shapes
    fin = open("splits/" + config.data_content + ".txt")
    dataset_names = [name.strip() for name in fin.readlines()]
    fin.close()
    dataset_len_original = len(dataset_names)
    dataset_len = min(dataset_len_original, max_num_of_contents)

    result_LP_IOU = np.zeros([dataset_len, styleset_len], np.float32)
    result_Div_IOU = np.zeros([dataset_len, styleset_len], np.float32)
    result_Div_IOU_raw = np.zeros([dataset_len, styleset_len, styleset_len],
                                  np.int32)

    buffer_size = 256 * 256 * 16  #change the buffer size if the input voxel is large
    patch_size = 12
    if not config.asymmetry:
        padding_size = 8 - patch_size // 2
    else:
        padding_size = 0
    sample_patch_num = 1000
    IOU_threshold = 0.95

    #prepare dictionary for style shapes
    dict_style_patches_tensor = []
    for style_id in range(styleset_len_original):
        data_dict = h5py.File(
            patches_dir + "/style_" + str(style_id) + ".hdf5", 'r')
        patches = data_dict['patches'][:]
        data_dict.close()
        patches_tensor = torch.from_numpy(patches).to(device)
        dict_style_patches_tensor.append(patches_tensor)

    for content_id in range(dataset_len):
        for style_id in range(styleset_len):

            start_time = time.time()

            voxel_model_file = open(
                result_dir + "/output_content_" + str(content_id) + "_style_" +
                str(style_id) + ".binvox", 'rb')
            output_shape = binvox_rw.read_as_3d_array(
                voxel_model_file, fix_coords=False).data.astype(np.uint8)
            output_shape = np.ascontiguousarray(output_shape[:, :,
                                                             padding_size:])

            patches = np.zeros(
                [buffer_size, patch_size, patch_size, patch_size], np.uint8)
            patch_num = cutils.get_patches(output_shape, patches, patch_size)
            if patch_num > sample_patch_num:
                patches = patches[:patch_num]
                np.random.shuffle(patches)
                patches = patches[:sample_patch_num]
                patches = np.ascontiguousarray(patches)
                patch_num = sample_patch_num
            else:
                patches = np.copy(patches[:patch_num])

            this_patches_tensor = torch.from_numpy(patches).to(device)

            #IOU
            similar_flags = np.zeros([patch_num, styleset_len_original],
                                     np.int32)
            for patch_id in range(patch_num):
                for compare_id in range(styleset_len_original):
                    patch_tensor = this_patches_tensor[patch_id:patch_id + 1]
                    patches_tensor = dict_style_patches_tensor[compare_id]
                    ious = torch.sum(
                        torch.bitwise_and(patch_tensor, patches_tensor),
                        dim=(1, 2, 3),
                        dtype=torch.int).float() / torch.sum(
                            torch.bitwise_or(patch_tensor, patches_tensor),
                            dim=(1, 2, 3),
                            dtype=torch.int).float()
                    iou = torch.max(ious).item()

                    similar_flags[patch_id, compare_id] = (iou > IOU_threshold)
            Div_IOU_raw = np.sum(similar_flags, axis=0)
            LP_IOU = np.sum(np.max(similar_flags, axis=1)) / float(patch_num)

            result_LP_IOU[content_id, style_id] = LP_IOU
            result_Div_IOU_raw[content_id,
                               style_id] = Div_IOU_raw[:styleset_len]

            print(content_id, style_id, time.time() - start_time, LP_IOU)

        #Div
        result_Div_IOU_mean = np.mean(result_Div_IOU_raw.astype(np.float32),
                                      axis=1,
                                      keepdims=True)
        result_Div_IOU_normalized = result_Div_IOU_raw - result_Div_IOU_mean

        for style_id in range(styleset_len):
            # #top 10%
            # top_N = max(int(0.1*styleset_len),1)
            # ranking = np.argsort(result_Div_IOU_normalized[content_id,style_id])
            # valid_set = ranking[-top_N:]
            # if style_id in valid_set:
            #     Div_IOU = 1
            # else:
            #     Div_IOU = 0
            Div_IOU = (result_Div_IOU_normalized[content_id, style_id,
                                                 style_id] ==
                       np.max(result_Div_IOU_normalized[content_id, style_id]))
            result_Div_IOU[content_id, style_id] = Div_IOU

    #write result_LP_IOU
    fout = open(output_dir + "/result_LP_IOU.txt", 'w')
    for content_id in range(dataset_len):
        for style_id in range(styleset_len):
            fout.write(str(result_LP_IOU[content_id, style_id]))
            if style_id != styleset_len - 1:
                fout.write("\t")
        if content_id != dataset_len - 1:
            fout.write("\n")
    fout.close()

    #write result_Div_IOU
    fout = open(output_dir + "/result_Div_IOU.txt", 'w')
    for content_id in range(dataset_len):
        for style_id in range(styleset_len):
            fout.write(str(result_Div_IOU[content_id, style_id]))
            if style_id != styleset_len - 1:
                fout.write("\t")
        if content_id != dataset_len - 1:
            fout.write("\n")
    fout.close()

    #write result_Div_IOU_raw
    fout = open(output_dir + "/result_Div_IOU_raw.txt", 'w')
    for content_id in range(dataset_len):
        for style_id in range(styleset_len):
            for compare_id in range(styleset_len):
                fout.write(
                    str(result_Div_IOU_raw[content_id, style_id, compare_id]))
                if style_id != styleset_len - 1 or compare_id != styleset_len - 1:
                    fout.write("\t")
        if content_id != dataset_len - 1:
            fout.write("\n")
    fout.close()

    #write result_LP_Div_IOU_mean
    fout = open(output_dir + "/result_LP_Div_IOU_mean.txt", 'w')
    fout.write("LP_IOU:\n" + str(np.mean(result_LP_IOU)) + "\n")
    fout.write("Div_IOU:\n" + str(np.mean(result_Div_IOU)) + "\n")
    fout.close()
예제 #29
0
파일: math_ops.py 프로젝트: malfet/pytorch
 def pointwise_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
     f = torch.zeros(3)
     g = torch.tensor([-1, 0, 1])
     w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
     return (
         torch.abs(torch.tensor([-1, -2, 3])),
         torch.absolute(torch.tensor([-1, -2, 3])),
         torch.acos(a),
         torch.arccos(a),
         torch.acosh(a.uniform_(1.0, 2.0)),
         torch.add(a, 20),
         torch.add(a, torch.randn(4, 1), alpha=10),
         torch.addcdiv(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.addcmul(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.angle(a),
         torch.asin(a),
         torch.arcsin(a),
         torch.asinh(a),
         torch.arcsinh(a),
         torch.atan(a),
         torch.arctan(a),
         torch.atanh(a.uniform_(-1.0, 1.0)),
         torch.arctanh(a.uniform_(-1.0, 1.0)),
         torch.atan2(a, a),
         torch.bitwise_not(t),
         torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.ceil(a),
         torch.clamp(a, min=-0.5, max=0.5),
         torch.clamp(a, min=0.5),
         torch.clamp(a, max=0.5),
         torch.clip(a, min=-0.5, max=0.5),
         torch.conj(a),
         torch.copysign(a, 1),
         torch.copysign(a, b),
         torch.cos(a),
         torch.cosh(a),
         torch.deg2rad(
             torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0,
                                                              -90.0]])),
         torch.div(a, b),
         torch.divide(a, b, rounding_mode="trunc"),
         torch.divide(a, b, rounding_mode="floor"),
         torch.digamma(torch.tensor([1.0, 0.5])),
         torch.erf(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfc(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),
         torch.exp(torch.tensor([0.0, math.log(2.0)])),
         torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),
         torch.expm1(torch.tensor([0.0, math.log(2.0)])),
         torch.fake_quantize_per_channel_affine(
             torch.randn(2, 2, 2),
             (torch.randn(2) + 1) * 0.05,
             torch.zeros(2),
             1,
             0,
             255,
         ),
         torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),
         torch.float_power(torch.randint(10, (4, )), 2),
         torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4,
                                                             -5])),
         torch.floor(a),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),
         torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),
         torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.frac(torch.tensor([1.0, 2.5, -3.2])),
         torch.randn(4, dtype=torch.cfloat).imag,
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),
         torch.lerp(torch.arange(1.0, 5.0),
                    torch.empty(4).fill_(10), 0.5),
         torch.lerp(
             torch.arange(1.0, 5.0),
             torch.empty(4).fill_(10),
             torch.full_like(torch.arange(1.0, 5.0), 0.5),
         ),
         torch.lgamma(torch.arange(0.5, 2, 0.5)),
         torch.log(torch.arange(5) + 10),
         torch.log10(torch.rand(5)),
         torch.log1p(torch.randn(5)),
         torch.log2(torch.rand(5)),
         torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([-100.0, -200.0, -300.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([1.0, 2000.0, 30000.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-100.0, -200.0, -300.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([1.0, 2000.0, 30000.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logical_and(r, s),
         torch.logical_and(r.double(), s.double()),
         torch.logical_and(r.double(), s),
         torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),
         torch.logical_not(
             torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),
         torch.logical_not(
             torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
             out=torch.empty(3, dtype=torch.int16),
         ),
         torch.logical_or(r, s),
         torch.logical_or(r.double(), s.double()),
         torch.logical_or(r.double(), s),
         torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_xor(r, s),
         torch.logical_xor(r.double(), s.double()),
         torch.logical_xor(r.double(), s),
         torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logit(torch.rand(5), eps=1e-6),
         torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),
         torch.i0(torch.arange(5, dtype=torch.float32)),
         torch.igamma(a, b),
         torch.igammac(a, b),
         torch.mul(torch.randn(3), 100),
         torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),
         torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),
         torch.tensor([float("nan"),
                       float("inf"), -float("inf"), 3.14]),
         torch.nan_to_num(w),
         torch.nan_to_num(w, nan=2.0),
         torch.nan_to_num(w, nan=2.0, posinf=1.0),
         torch.neg(torch.randn(5)),
         # torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),
         torch.polygamma(1, torch.tensor([1.0, 0.5])),
         torch.polygamma(2, torch.tensor([1.0, 0.5])),
         torch.polygamma(3, torch.tensor([1.0, 0.5])),
         torch.polygamma(4, torch.tensor([1.0, 0.5])),
         torch.pow(a, 2),
         torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),
         torch.rad2deg(
             torch.tensor([[3.142, -3.142], [6.283, -6.283],
                           [1.570, -1.570]])),
         torch.randn(4, dtype=torch.cfloat).real,
         torch.reciprocal(a),
         torch.remainder(torch.tensor([-3.0, -2.0]), 2),
         torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.round(a),
         torch.rsqrt(a),
         torch.sigmoid(a),
         torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sgn(a),
         torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sin(a),
         torch.sinc(a),
         torch.sinh(a),
         torch.sqrt(a),
         torch.square(a),
         torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),
         torch.tan(a),
         torch.tanh(a),
         torch.trunc(a),
         torch.xlogy(f, g),
         torch.xlogy(f, g),
         torch.xlogy(f, 4),
         torch.xlogy(2, g),
     )
예제 #30
0
    def segment(self, folder, dest="", tsize=512, transform=None):
        """
        segment a folder of images

        parameters
        ----------
        folder: string
            folder containing the images to segment

        dest: string
            folder where the predicted masks are written

        tsize: int
            segmentation tile size

        transform: Transform
            transform applied to the predicted masks
        """
        
        # inits
        self.set_eval()
        tf_resize = Resize()
        dataset = ImgDataset(folder)
        img_count = 0
        mask_p = None
        sum_jaccard = 0

        for image, mask in dataset:
            img_count += 1
            sum_intersion, sum_union = 0, 0
            tile_dataset = TileDataset(image, mask, tsize=tsize,
                                       mask_merge=(self._n_classes <= 2))
            dl = DataLoader(dataset=tile_dataset, batch_size=1)

            for tile, tile_mask, tile_id in dl:
                # compute tile position and size without padding
                offset = tile_dataset.topology.tile_offset(tile_id)
                off_x, off_y = offset[1].item(), offset[0].item()
                t_h, t_w = tsize, tsize
                if off_x + tsize > image.height:
                    t_h = image.height - off_x
                if off_y + tsize > image.width:
                    t_w = image.width - off_y

                # compute predicted tile mask
                tile_mask_p = self.predict(tile, transform).cpu()
                # resize if necessary
                if tile_mask_p.shape != tile_mask.shape:
                    tile_mask_p = tf_resize(tile_mask_p, (tsize, tsize))
                # select area without padding
                tile_mask_p = tile_mask_p[:,1:,:t_h,:t_w].int()
                tile_mask = tile_mask[:,1:,:t_h,:t_w].int()

                # compute intersection and union
                sum_intersion += torch.sum(torch.bitwise_and(tile_mask_p, tile_mask))
                sum_union += torch.sum(torch.bitwise_or(tile_mask_p, tile_mask))
                #TODO tile overlap and merging should be taken into account when computing IoU
                #TODO this is computed on the whole tensor -> channel wise better ?
                
                # write the predicted tile mask to the predicted mask
                # bitwise_or is used for tiles merging
                if mask_p is None:
                    mask_p = torch.zeros(tile_mask_p.shape[1], image.height, 
                                         image.width, dtype=torch.int)
                mask_p[:, off_x:(off_x+t_h), off_y:(off_y+t_w)] = torch.bitwise_or(
                    mask_p[:, off_x:(off_x+t_h), off_y:(off_y+t_w)], tile_mask_p.squeeze(0))

            # write the predicted mask channels to files
            if dest == "":
                dest = folder
            if not os.path.exists(dest):
                os.makedirs(dest)
            for i in range(mask_p.shape[0]):
                filename = dest + f'/{img_count}_yp_{i+1}.png'
                cv2.imwrite(filename, mask_p[i].numpy()*255)

            # compute jaccard
            jaccard = 1
            if sum_union != 0:
                jaccard = sum_intersion / sum_union
            sum_jaccard += jaccard
            print(f'image: {img_count}/{len(dataset)}, jaccard: {jaccard:.4f}')
        print(f'average jaccard: {(sum_jaccard/len(dataset)):.4f}')