Пример #1
0
    def _interpolate(im, x, y, out_size):
        num_batch, height, width, channels = im.size(
        )  # to be sure the input dims is NHWC
        x = torch._cast_Float(x).cuda()
        y = torch._cast_Float(y).cuda()
        height_f = torch._cast_Float(torch.Tensor([height]))[0].cuda()
        width_f = torch._cast_Float(torch.Tensor([width]))[0].cuda()
        out_height = out_size[0]
        out_width = out_size[1]
        zero = torch.zeros([], dtype=torch.int32).cuda()
        max_y = torch._cast_Long(torch.Tensor([height - 1]))[0].cuda()
        max_x = torch._cast_Long(torch.Tensor([width - 1]))[0].cuda()

        # scale indices from [-1, 1] to [0, width/height]
        x = (x + 1.0) * width_f / 2.0
        y = (y + 1.0) * height_f / 2.0

        # do sampling
        x0 = torch._cast_Long(torch.floor(x)).cuda()
        x1 = x0 + 1
        y0 = torch._cast_Long(torch.floor(y)).cuda()
        y1 = y0 + 1

        x0 = torch.clamp(x0, zero, max_x)
        x1 = torch.clamp(x1, zero, max_x)
        y0 = torch.clamp(y0, zero, max_y)
        y1 = torch.clamp(y1, zero, max_y)
        dim2 = width
        dim1 = width * height
        base = _repeat(torch.arange(num_batch) * dim1,
                       out_height * out_width).cuda()
        base_y0 = base + y0 * dim2
        base_y1 = base + y1 * dim2
        idx_a = base_y0 + x0
        idx_b = base_y1 + x0
        idx_c = base_y0 + x1
        idx_d = base_y1 + x1

        # use indices to look up pixels in the flate images
        # and restore channels dim
        im_flat = im.contiguous().view(-1, channels)
        im_flat = torch._cast_Float(im_flat)
        Ia = im_flat[idx_a]  # as in tf, the default dim is row first
        Ib = im_flat[idx_b]
        Ic = im_flat[idx_c]
        Id = im_flat[idx_d]

        # calculate interpolated values
        x0_f = torch._cast_Float(x0).cuda()
        x1_f = torch._cast_Float(x1).cuda()
        y0_f = torch._cast_Float(y0).cuda()
        y1_f = torch._cast_Float(y1).cuda()
        wa = ((x1_f - x) * (y1_f - y)).unsqueeze(1)
        wb = ((x1_f - x) * (y - y0_f)).unsqueeze(1)
        wc = ((x - x0_f) * (y1_f - y)).unsqueeze(1)
        wd = ((x - x0_f) * (y - y0_f)).unsqueeze(1)

        return wa * Ia + wb * Ib + wc * Ic + wd * Id
Пример #2
0
def aggregate_accuracy(r_dict, metric_name_list):
    m_list = []
    for metric_name in metric_name_list:
        m_list.append(r_dict[metric_name][0])
    m_list[0] = torch._cast_Long(m_list[0])
    m_list[1] = torch._cast_Long(m_list[1])
    m_list[2] = torch._cast_Long(m_list[2])
    agg = torch.stack(m_list, dim=0)
    agg = agg.prod(0, keepdim=False)
    return (agg.sum(), agg.numel())
Пример #3
0
    def loss(self, score, truth):
        loss = 0
        B = len(truth)
        agg_num_score, agg_score = score
        #loss for the column number
        truth_num = [len(t) for t in truth] # double check truth format and for test cases
        data = torch.from_numpy(np.array(truth_num))
        data = torch._cast_Long(data)
        if self.gpu:
            truth_num_var = Variable(data.cuda())
        else:
            truth_num_var = Variable(data)
        loss += self.CE(agg_num_score, truth_num_var)
        #loss for the key words
        T = len(agg_score[0])
        truth_prob = np.zeros((B, T), dtype=np.float32)
        for b in range(B):
            truth_prob[b][truth[b]] = 1
        data = torch.from_numpy(truth_prob)
        if self.gpu:
            truth_var = Variable(data.cuda())
        else:
            truth_var = Variable(data)
        #loss += self.mlsml(agg_score, truth_var)
        #loss += self.bce_logit(agg_score, truth_var) # double check no sigmoid
        pred_prob = self.sigm(agg_score)
        bce_loss = -torch.mean( 3*(truth_var * \
                torch.log(pred_prob+1e-10)) + \
                (1-truth_var) * torch.log(1-pred_prob+1e-10) )
        loss += bce_loss

        return loss
Пример #4
0
    def __init__(self, ndim, ind, scale=None):
        """
        Constructor
        :param ndim: Int, number of dimensions
        :param ind: Iterable, indices of uniformly distributed entries
        :param scale: Iterable, standard deviation of Gaussian or width of
        uniform distribution
        """
        super().__init__()
        self.ndim = ndim

        # Set up indices and permutations
        self.ndim = ndim
        if torch.is_tensor(ind):
            self.register_buffer('ind', torch._cast_Long(ind))
        else:
            self.register_buffer('ind', torch.tensor(ind, dtype=torch.long))

        ind_ = []
        for i in range(self.ndim):
            if not i in self.ind:
                ind_ += [i]
        self.register_buffer('ind_', torch.tensor(ind_, dtype=torch.long))

        perm_ = torch.cat((self.ind, self.ind_))
        inv_perm_ = torch.zeros_like(perm_)
        for i in range(self.ndim):
            inv_perm_[perm_[i]] = i
        self.register_buffer('inv_perm', inv_perm_)

        if scale is None:
            self.register_buffer('scale', torch.ones(self.ndim))
        else:
            self.register_buffer('scale', scale)
Пример #5
0
    def loss(self, score, truth):
        loss = 0
        B = len(truth)
        op_num_score, op_score = score
        truth = [t if len(t) <= 2 else t[:2] for t in truth]
        # loss for the op number
        truth_num = [len(t) - 1
                     for t in truth]  #num_score 0 maps to 1 in truth
        data = torch.from_numpy(np.array(truth_num))
        data = torch._cast_Long(data)
        if self.gpu:
            truth_num_var = Variable(data.cuda())
        else:
            truth_num_var = Variable(data)
        loss += self.CE(op_num_score, truth_num_var)
        # loss for op
        T = len(op_score[0])
        truth_prob = np.zeros((B, T), dtype=np.float32)
        for b in range(B):
            truth_prob[b][truth[b]] = 1
        data = torch.from_numpy(np.array(truth_prob))
        if self.gpu:
            truth_var = Variable(data.cuda())
        else:
            truth_var = Variable(data)
        #loss += self.mlsml(op_score, truth_var)
        #loss += self.bce_logit(op_score, truth_var)
        pred_prob = self.sigm(op_score)
        bce_loss = -torch.mean( 3*(truth_var * \
                torch.log(pred_prob+1e-10)) + \
                (1-truth_var) * torch.log(1-pred_prob+1e-10) )
        loss += bce_loss

        return loss
Пример #6
0
 def _repeat(x, n_repeats):
     rep = torch._cast_Long(
         torch.transpose(torch.ones([
             n_repeats,
         ]).unsqueeze(1), 1, 0))
     x = torch.matmul(x.view(-1, 1), rep)
     return x.view(-1)
    def loss(self, score, truth):
        loss = 0
        data = torch.from_numpy(np.array(truth))
        data = torch._cast_Long(data)
        if self.gpu:
            truth_var = Variable(data.cuda())
        else:
            truth_var = Variable(data)
        loss = self.CE(score, truth_var)

        return loss
Пример #8
0
 def forward(self, inputs: torch.Tensor):
     outputs = torch.zeros(
         *inputs.size(), self.hidden_size,
         device=inputs.device)  # [batch, label_num, hidden_dim]
     for left, right, emb in zip(self.group_offset[:-1],
                                 self.group_offset[1:], self.emb):
         index = (left <= inputs) & (inputs < right
                                     )  # bool类型 [batch, label_num]
         group_inputs = torch._cast_Long(
             (inputs[index] - left).to(emb.weight.device))
         outputs[index] = emb(group_inputs).to(inputs.device)
     return outputs  # [batch, label_num, hidden_dim]
Пример #9
0
    def warp_pts(self, pts, flow):
        x = pts[:, :, 0]
        x = torch.clamp((x + 1) / 2 * width, 0, width - 1)
        x = torch._cast_Int(torch.round(x))
        y = pts[:, :, 1]
        y = torch.clamp((y + 1) / 2 * height, 0, height - 1)
        y = torch._cast_Int(torch.round(y))

        out = []
        for i in range(batch_size):
            flow_ = flow[i, :, :, :].view([-1, 2])
            xy = x[i, :] + y[i, :] * width
            xy = torch._cast_Long(xy)
            temp = flow_[xy]
            out.append(temp.view([1, max_matches, 2]))
        return torch.cat(out, 0)
Пример #10
0
    def __init__(self, ndim, ind, scale=1., bias=False, activation=None):
        """
        Constructor
        :param ndim: Int, number of dimensions
        :param ind: Iterable, indices of input elements to convert to
        periodic features
        :param scale: Scalar or iterable, used to scale inputs before
        converting them to periodic features
        :param bias: Flag, whether to add a bias
        :param activation: Function or None, activation function to be
        applied
        """
        super(PeriodicFeatures, self).__init__()

        # Set up indices and permutations
        self.ndim = ndim
        if torch.is_tensor(ind):
            self.register_buffer('ind', torch._cast_Long(ind))
        else:
            self.register_buffer('ind', torch.tensor(ind, dtype=torch.long))

        ind_ = []
        for i in range(self.ndim):
            if not i in self.ind:
                ind_ += [i]
        self.register_buffer('ind_', torch.tensor(ind_, dtype=torch.long))

        perm_ = torch.cat((self.ind, self.ind_))
        inv_perm_ = torch.zeros_like(perm_)
        for i in range(self.ndim):
            inv_perm_[perm_[i]] = i
        self.register_buffer('inv_perm', inv_perm_)

        self.weights = nn.Parameter(torch.ones(len(self.ind), 2))
        if torch.is_tensor(scale):
            self.register_buffer('scale', scale)
        else:
            self.scale = scale

        self.apply_bias = bias
        if self.apply_bias:
            self.bias = nn.Parameter(torch.zeros(len(self.ind)))

        if activation is None:
            self.activation = lambda input: input
        else:
            self.activation = activation
Пример #11
0
    def loss(self, score, truth):
        #here suppose truth looks like [[[1, 4], 3], [], ...]
        loss = 0
        B = len(truth)
        col_num_score, col_score = score
        #loss for the column number
        truth_num = [len(t) - 1 for t in truth
                     ]  # double check truth format and for test cases
        data = torch.from_numpy(np.array(truth_num, dtype=np.long))
        data = torch._cast_Long(data)
        if self.gpu:
            truth_num_var = Variable(data.cuda())
        else:
            truth_num_var = Variable(data)
        loss += self.CE(col_num_score, truth_num_var)
        #loss for the key words
        T = len(col_score[0])
        # print("T {}".format(T))
        truth_prob = np.zeros((B, T), dtype=np.float32)
        for b in range(B):
            gold_l = []
            for t in truth[b]:
                if isinstance(t, list):
                    gold_l.extend(t)
                else:
                    gold_l.append(t)
            truth_prob[b][gold_l] = 1
        data = torch.from_numpy(truth_prob)
        # print("data {}".format(data))
        # print("data {}".format(data.cuda()))
        if self.gpu:
            truth_var = Variable(data.cuda())
        else:
            truth_var = Variable(data)
        #loss += self.mlsml(col_score, truth_var)
        #loss += self.bce_logit(col_score, truth_var) # double check no sigmoid
        pred_prob = self.sigm(col_score)
        bce_loss = -torch.mean( 3*(truth_var * \
                torch.log(pred_prob+1e-10)) + \
                (1-truth_var) * torch.log(1-pred_prob+1e-10) )
        loss += bce_loss

        return loss