예제 #1
0
    def _forward_alg(self, feats, mask):

        batch_size = feats.size(0)
        seq_len = feats.size(1)
        tag_size = feats.size(2)
        mask = mask.transpose(1, 0).contiguous()
        ins_num = seq_len * batch_size
        feats = feats.transpose(1,0).contiguous().view(ins_num,1, tag_size).expand(ins_num, tag_size, tag_size)
        scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
        scores = scores.view(seq_len, batch_size, tag_size, tag_size)
        # build iter
        seq_iter = enumerate(scores)
        _, inivalues = next(seq_iter)  # bat_size * from_target_size * to_target_size
        partition = inivalues[:, self.START_TAG, :].clone().view(batch_size, tag_size, 1)  # bat_size * to_target_size

        for idx, cur_values in seq_iter:

            cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
            cur_partition = log_sum_exp(cur_values, tag_size)

            mask_idx = mask[idx, :].view(batch_size, 1).expand(batch_size, tag_size)

            mask_idx = Variable(mask_idx)
            masked_cur_partition = cur_partition.masked_select(mask_idx)
            mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)

            partition.masked_scatter_(mask_idx, masked_cur_partition)

        cur_values = self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
        cur_partition = log_sum_exp(cur_values, tag_size)
        final_partition = cur_partition[:, self.STOP_TAG]
        return final_partition.sum(), scores
예제 #2
0
	def forward(self,eyes,v,R,t,alpha):

		P = Variable(torch.zeros(eyes.size(0), 169, 3))
		for b in range(eyes.size(0)):
			for i in range(13):
				for j in range(13):
					k = 13*i + j 
					P[b,k,0] = (i-6)/6
					P[b,k,1] = (j-6)/6
					P[b,k,2] = 1


		id_matrix = Variable(torch.zeros(eyes.size(0), 3, 3))
		id_matrix[:,0,0] = alpha
		id_matrix[:,1,1] = alpha
		id_matrix[:,2,2] = alpha

		#Normalize vector!
		v = v / v.norm(2, 1).clamp(min=0.00000000000001).view(-1,1).expand_as(v)


		v_matrix = torch.bmm(v.view(-1,3,1),v.view(-1,1,3))
		

		M = v_matrix-id_matrix

		sigma_matrix = Variable(torch.zeros(eyes.size(0), 3, 3))

		v1 = R[:,:,0].contiguous().view(-1,3)
		v2 = R[:,:,1].contiguous().view(-1,3)

		u_e = eyes

		v11 = v1.contiguous().view(-1,1,3)
		v21 = v2.contiguous().view(-1,1,3)
		v12 = v1.contiguous().view(-1,3,1)
		v22 = v2.contiguous().view(-1,3,1)
		u_e1 = u_e.contiguous().view(-1,1,3)
		u_e2 = u_e.contiguous().view(-1,3,1)
		t1 = t.contiguous().view(-1,1,3)
		t2 = t.contiguous().view(-1,3,1)


		sigma_matrix[:,0:1,0:1] = torch.bmm(v11,torch.bmm(M,v12))
		sigma_matrix[:,0:1,1:2] = torch.bmm(v11,torch.bmm(M,v22))
		sigma_matrix[:,0:1,2:3] = torch.bmm(v11,torch.bmm(M,(t2-u_e2)))
		sigma_matrix[:,1:2,0:1] = torch.bmm(v21,torch.bmm(M,v12))
		sigma_matrix[:,1:2,1:2] = torch.bmm(v21,torch.bmm(M,v22))
		sigma_matrix[:,1:2,2:3] = torch.bmm(v21,torch.bmm(M,t2-u_e2))
		sigma_matrix[:,2:3,0:1] = torch.bmm(t1-u_e1,torch.bmm(M,v12))
		sigma_matrix[:,2:3,1:2] = torch.bmm(t1-u_e1,torch.bmm(M,v22))
		sigma_matrix[:,2:3,2:3] = torch.bmm(t1-u_e1,torch.bmm(M,t2-u_e2))

		sigma_matrix_all = sigma_matrix.view(-1,1,3,3).expand(eyes.size(0),169,3,3).contiguous().view(-1,3,3)
		P1 = P.contiguous().view(-1,1,3)
		P2 = P.contiguous().view(-1,3,1)
		sum_all = torch.bmm(P1,torch.bmm(sigma_matrix_all,P2)).contiguous().view(-1,169)

		return sum_all
예제 #3
0
    def _forward_alg(self, feats, mask):
        """
        Do the forward algorithm to compute the partition function (batched).

        Args:
            feats: size=(batch_size, seq_len, self.target_size+2)
            mask: size=(batch_size, seq_len)

        Returns:
            xxx
        """
        batch_size = feats.size(0)
        seq_len = feats.size(1)
        tag_size = feats.size(2)
        mask = mask.transpose(1, 0).contiguous()
        ins_num = seq_len * batch_size
        """ be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1) """
        feats = feats.transpose(1, 0).contiguous().view(
            ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
        """ need to consider start """
        scores = feats + self.transitions.view(1, tag_size, tag_size).expand(
            ins_num, tag_size, tag_size)
        scores = scores.view(seq_len, batch_size, tag_size, tag_size)
        # build iter
        seq_iter = enumerate(scores)
        _, inivalues = next(
            seq_iter)  # bat_size * from_target_size * to_target_size
        """ only need start from start_tag """
        partition = inivalues[:, self.START_TAG, :].clone().view(
            batch_size, tag_size, 1)  # bat_size * to_target_size
        """
        add start score (from start to all tag, duplicate to batch_size)
        partition = partition + self.transitions[START_TAG,:].view(1, tag_size, 1).expand(batch_size, tag_size, 1)
        iter over last scores
        """
        for idx, cur_values in seq_iter:
            """
            previous to_target is current from_target
            partition: previous results log(exp(from_target)), #(batch_size * from_target)
            cur_values: bat_size * from_target * to_target
            """
            cur_values = cur_values + partition.contiguous().view(
                batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
            cur_partition = log_sum_exp(cur_values, tag_size)

            mask_idx = mask[idx, :].view(batch_size,
                                         1).expand(batch_size, tag_size)
            """ effective updated partition part, only keep the partition value of mask value = 1 """
            mask_idx = Variable(mask_idx)
            masked_cur_partition = cur_partition.masked_select(mask_idx)
            """ let mask_idx broadcastable, to disable warning """
            mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)
            """ replace the partition where the maskvalue=1, other partition value keeps the same """
            partition.masked_scatter_(mask_idx, masked_cur_partition)
        """ 
        until the last state, add transition score for all partition (and do log_sum_exp) 
        then select the value in STOP_TAG 
        """
        cur_values = self.transitions.view(1, tag_size, tag_size).expand(
            batch_size, tag_size, tag_size) + partition.contiguous().view(
                batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
        cur_partition = log_sum_exp(cur_values, tag_size)
        final_partition = cur_partition[:, self.STOP_TAG]
        return final_partition.sum(), scores