Ejemplo n.º 1
0
    def forward(self, batch):

        content = batch[: 3]
        question = batch[3: 6]

        # mask
        content_mask = utils.get_mask(content[0])
        question_mask = utils.get_mask(question[0])

        # embedding
        content_vec = self.embedding(content)
        question_vec = self.embedding(question)

        # encoder
        content_vec = self.encoder(content_vec, content_mask)
        question_vec = self.encoder(question_vec, question_mask)

        # aligner
        R1, Z1, E1, B1 = self.align_1(content_vec, content_mask, question_vec, question_mask)
        R2, Z2, E2, B2 = self.align_2(R1, content_mask, question_vec, question_mask, E1, B1)
        R3, _, _, _ = self.align_3(R2, content_mask, question_vec, question_mask, E2, B2, Z1, Z2)

        # pointer
        out = self.pointer(R3, content_mask, question_vec, question_mask)

        return out
Ejemplo n.º 2
0
    def inference_minor_util60(role_id, handcards, num, is_pair, dup_mask, main_cards_char):
        for main_card in main_cards_char:
            handcards.remove(main_card)

        s = get_mask(handcards, action_space, None).astype(np.float32)
        outputs = []
        minor_type = 1 if is_pair else 0
        for i in range(num):
            input_single, input_pair, _, _ = get_masks(handcards, None)
            _, _, _, _, _, _, minor_response_prob = func(
                [np.array([role_id]), s.reshape(1, -1), np.zeros([1, 9085]), np.array([minor_type])]
            )

            # give minor cards
            mask = None
            if is_pair:
                mask = np.concatenate([input_pair, [0, 0]]) * dup_mask
            else:
                mask = input_single * dup_mask

            minor_response = take_action_from_prob(minor_response_prob, mask)
            dup_mask[minor_response] = 0

            # convert network output to char cards
            handcards.remove(to_char(minor_response + 3))
            if is_pair:
                handcards.remove(to_char(minor_response + 3))
            s = get_mask(handcards, action_space, None).astype(np.float32)

            # save to output
            outputs.append(to_char(minor_response + 3))
            if is_pair:
                outputs.append(to_char(minor_response + 3))
        return outputs
Ejemplo n.º 3
0
    def call(self, inputs, state):
        """Gated recurrent unit (GRU) with nunits cells."""
        mask_w, mask_b = get_mask(self._gate_kernel,
                                  self.rho), get_mask(self._gate_bias,
                                                      self.rho)
        w_ = tf.where(mask_w, B_tanh(self._gate_kernel),
                      tf.zeros(self._gate_kernel.shape))
        b_ = tf.where(mask_b, B_tanh(self._gate_bias),
                      tf.zeros(self._gate_bias.shape))

        gate_inputs = tf.matmul(array_ops.concat([inputs, state], 1), w_)
        gate_inputs = nn_ops.bias_add(gate_inputs, b_)

        value = B_sigmoid(gate_inputs)
        r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)

        r_state = r * state

        mask_w, mask_b = get_mask(self._candidate_kernel,
                                  self.rho), get_mask(self._candidate_bias,
                                                      self.rho)
        w_ = tf.where(mask_w, B_tanh(self._candidate_kernel),
                      tf.zeros(self._candidate_kernel.shape))
        b_ = tf.where(mask_b, B_tanh(self._candidate_bias),
                      tf.zeros(self._candidate_bias.shape))

        candidate = tf.matmul(array_ops.concat([inputs, r_state], 1), w_)
        candidate = nn_ops.bias_add(candidate, b_)

        c = B_tanh(candidate)
        new_h = (1 - u) * state + u * c
        return new_h, new_h
Ejemplo n.º 4
0
    def forward(self, batch):
        """
        :param batch: [content, question, answer_start, answer_end]
        :return: ans_range (2, batch_size, content_len)
        """
        content = batch[: 3]
        question = batch[3: 6]

        # mask
        content_mask = utils.get_mask(content[0])  # (batch_size, seq_len)
        question_mask = utils.get_mask(question[0])

        # embedding
        content_vec = self.embedding(content)  # (seq_len, batch_size, embedding_dim)
        question_vec = self.embedding(question)

        # encoder
        content_vec = self.encoder(content_vec, content_mask)  # (seq_len, batch_size, hidden_size(*2))
        question_vec = self.encoder(question_vec, question_mask)

        # match-rnn
        hr = self.match_rnn(content_vec, content_mask, question_vec, question_mask)  # (p_seq_len, batch_size, hidden_size(*2))

        # pointer
        ans_range = self.pointer_net(hr, content_mask)

        return ans_range
Ejemplo n.º 5
0
    def forward(self, batch):
        """
        :param batch:
        :return: (batch_size, 3)
        """
        passage = batch[:3]
        query = batch[3:6]
        alter_1 = batch[6]
        alter_2 = batch[7]
        alter_3 = batch[8]

        # mask
        passage_mask = utils.get_mask(passage[0])
        query_mask = utils.get_mask(query[0])
        alter1_mask = utils.get_mask(alter_1)
        alter2_mask = utils.get_mask(alter_2)
        alter3_mask = utils.get_mask(alter_3)

        # embedding
        passage_vec = self.embedding(passage)
        query_vec = self.embedding(query)
        alter1_vec = self.embedding(alter_1)
        alter2_vec = self.embedding(alter_2)
        alter3_vec = self.embedding(alter_3)

        # encoder
        passage_vec = self.encoder_p(
            passage_vec, passage_mask)  # (c_len, batch_size, hidden_size*2)
        _, query_vec = self.encoder_q(
            query_vec, query_mask,
            need_final_state=True)  # (batch_size, hidden_size)
        _, alter1_vec = self.encoder_a(
            alter1_vec, alter1_mask,
            need_final_state=True)  # (batch_size, hidden_size)
        _, alter2_vec = self.encoder_a(alter2_vec,
                                       alter2_mask,
                                       need_final_state=True)
        _, alter3_vec = self.encoder_a(alter3_vec,
                                       alter3_mask,
                                       need_final_state=True)
        alters = torch.stack([alter1_vec, alter2_vec, alter3_vec
                              ]).transpose(0,
                                           1)  # (batch_size, 3, hidden_size)

        # attention
        alpha = self.sim_w(query_vec).unsqueeze(
            2)  # (batch_size, hidden_size*2, 1)
        alpha = torch.bmm(passage_vec.transpose(0, 1),
                          alpha).squeeze(2)  # (batch_size, c_len)
        mask = passage_mask.eq(0)
        alpha.masked_fill_(mask, -float('inf'))
        alpha = f.softmax(alpha, dim=1).unsqueeze(1)  # (batch_size, 1, c_len)
        passage_vec = torch.bmm(alpha, passage_vec.transpose(0, 1)).squeeze(
            1)  # (batch_size, hidden_size*2)

        # outputs
        outputs = self.choose(passage_vec, alters)
        outputs = f.log_softmax(outputs, dim=1)

        return outputs
Ejemplo n.º 6
0
    def train_collate_fn(self, batch):
        u_ids, i_ids, ratings, u_revs, i_revs, u_rids, i_rids, ui_revs, neg_ui_revs, ui_labels, neg_ui_labels = zip(
            *batch)
        u_ids = LongTensor(u_ids)
        i_ids = LongTensor(i_ids)
        ratings = FloatTensor(ratings)
        u_revs = LongTensor(u_revs)
        i_revs = LongTensor(i_revs)
        u_rids = LongTensor(u_rids)
        i_rids = LongTensor(i_rids)
        ui_revs = LongTensor(ui_revs)
        neg_ui_revs = LongTensor(neg_ui_revs)
        ui_labels = FloatTensor(ui_labels)
        neg_ui_labels = FloatTensor(neg_ui_labels)

        u_rev_word_masks, u_rev_sent_masks, u_rev_masks = self.get_all_masks(
            u_revs)
        i_rev_word_masks, i_rev_sent_masks, i_rev_masks = self.get_all_masks(
            i_revs)
        ui_word_masks = get_mask(ui_revs)
        neg_ui_word_masks = get_mask(neg_ui_revs)


        return (u_ids, i_ids, ratings), (u_revs, i_revs, u_rev_word_masks, i_rev_word_masks, u_rev_sent_masks, i_rev_sent_masks,
                u_rev_masks, i_rev_masks), (u_rids, i_rids), \
                (ui_revs, neg_ui_revs, ui_word_masks, neg_ui_word_masks,  ui_labels, neg_ui_labels)
Ejemplo n.º 7
0
    def forward(self, inputs):
        premises_indices = inputs[0]
        hypothesis_indices = inputs[1]
        premises_lengths = torch.sum(premises_indices != 0, dim=-1)
        hypothesis_lengths = torch.sum(hypothesis_indices != 0, dim=-1)
        premise_mask = get_mask(premises_indices,
                                premises_lengths).to(self.args.device)
        hypothesis_mask = get_mask(hypothesis_indices,
                                   hypothesis_lengths).to(self.args.device)

        embed_premises = self.embed(premises_indices)
        embed_hypothesis = self.embed(hypothesis_indices)

        if self.dropout:
            embed_premises = self._rnn_dropout(embed_premises)
            embed_hypothesis = self._rnn_dropout(embed_hypothesis)

        encoded_premises = self._encoding(embed_premises, premises_lengths)
        encoded_hypothesis = self._encoding(embed_hypothesis,
                                            hypothesis_lengths)

        attended_premises, attended_hypothesis = self._attention(
            encoded_premises, premise_mask, encoded_hypothesis,
            hypothesis_mask)
        enhanced_premise = torch.cat([
            encoded_premises, attended_premises, encoded_premises -
            attended_premises, encoded_premises * attended_premises
        ],
                                     dim=-1)
        enhanced_hypothesis = torch.cat([
            encoded_hypothesis, attended_hypothesis, encoded_hypothesis -
            attended_hypothesis, encoded_hypothesis * attended_hypothesis
        ],
                                        dim=-1)

        projected_premises = self._projection(enhanced_premise)
        projected_hypothesis = self._projection(enhanced_hypothesis)

        if self.dropout:
            projected_premises = self._rnn_dropout(projected_premises)
            projected_hypothesis = self._rnn_dropout(projected_hypothesis)

        v_ai = self._composition(projected_premises, premises_lengths)
        v_bj = self._composition(projected_hypothesis, hypothesis_lengths)

        v_a_avg = torch.sum(v_ai * premise_mask.unsqueeze(1)\
                            .transpose(2, 1), dim=1) / torch.sum(premise_mask, dim=1, keepdim=True)
        v_b_avg = torch.sum(
            v_bj * hypothesis_mask.unsqueeze(1).transpose(2, 1),
            dim=1) / torch.sum(hypothesis_mask, dim=1, keepdim=True)

        v_a_max, _ = replace_masked(v_ai, premise_mask, -1e7).max(dim=1)
        v_b_max, _ = replace_masked(v_bj, hypothesis_mask, -1e7).max(dim=1)

        v = torch.cat([v_a_avg, v_a_max, v_b_avg, v_b_max], dim=1)

        logits = self._classification(v)

        return logits
Ejemplo n.º 8
0
    def forward(self, batch):
        """
        :param batch: [content, question, answer_start, answer_end]
        :return: ans_range(2, batch_size, content_len)
        """

        passage = batch[: 3]
        query = batch[3: 6]
        alter_1 = batch[6]
        alter_2 = batch[7]
        alter_3 = batch[8]

        # mask
        passage_mask = utils.get_mask(passage[0])
        query_mask = utils.get_mask(query[0])
        alter1_mask = utils.get_mask(alter_1)
        alter2_mask = utils.get_mask(alter_2)
        alter3_mask = utils.get_mask(alter_3)

        # embedding
        passage_vec = self.embedding(passage)
        query_vec = self.embedding(query)
        alter1_vec = self.embedding(alter_1)
        alter2_vec = self.embedding(alter_2)
        alter3_vec = self.embedding(alter_3)

        # encode
        passage_vec = self.encoder_p_q(passage_vec, passage_mask)
        query_vec = self.encoder_p_q(query_vec, query_mask)

        # encoder alters
        # 均值
        alter1_vec = self.encoder_a(alter1_vec, alter1_mask)  # (a_len, batch_size, hidden_size*2)
        alter1_vec = utils.compute_mean(alter1_vec, alter1_mask)
        alter2_vec = self.encoder_a(alter2_vec, alter2_mask)
        alter2_vec = utils.compute_mean(alter2_vec, alter2_mask)
        alter3_vec = self.encoder_a(alter3_vec, alter3_mask)
        alter3_vec = utils.compute_mean(alter3_vec, alter3_mask)

        alters = torch.stack([alter1_vec, alter2_vec, alter3_vec])
        alters = alters.transpose(0, 1)  # (batch_size, 3, hidden_size*2)

        # match rnn
        hr = self.match_rnn(passage_vec, passage_mask, query_vec, query_mask)

        # self matching attention
        hr = self.self_match_attention(hr, passage_mask)

        # aggregation
        hr = self.addition_rnn(hr, passage_mask)

        # mean p
        hr = self.mean_p(hr, passage_mask)  # (batch_size, hidden_size*2)

        # outputs
        outputs = self.choose(hr, alters)
        outputs = f.log_softmax(outputs, dim=1)

        return outputs
Ejemplo n.º 9
0
    def _run_interface(self, runtime):
        preprocessedfile = self.inputs.preprocessedfile
        regfile = self.inputs.regfile

        #invert transform matrix
        invt = fsl.ConvertXFM()
        invt.inputs.in_file = regfile
        invt.inputs.invert_xfm = True
        invt.inputs.out_file = regfile + '_inv.mat'
        invt_result= invt.run()

        #define source mask (surface, volume)
        input_labels = self.inputs.vol_source+self.inputs.vol_target
        sourcemask = get_mask(input_labels, self.inputs.parcfile)
        sourcemaskfile = os.path.abspath('sourcemask.nii')
        sourceImg = nb.Nifti1Image(sourcemask, None)
        nb.save(sourceImg, sourcemaskfile)

        #transform anatomical mask to functional space
        sourcexfm = fsl.ApplyXfm()
        sourcexfm.inputs.in_file = sourcemaskfile
        sourcexfm.inputs.in_matrix_file = invt_result.outputs.out_file
        _, base, _ = split_filename(sourcemaskfile)
        sourcexfm.inputs.out_file = base + '_xfm.nii.gz'
        sourcexfm.inputs.reference = preprocessedfile
        sourcexfm.inputs.interp = 'nearestneighbour'
        sourcexfm.inputs.apply_xfm = True
        sourcexfm_result = sourcexfm.run()

        #manual source data creation (-mask_source option not yet available in afni)
        sourcemask_xfm = nb.load(sourcexfm_result.outputs.out_file).get_data()
        inputdata = nb.load(preprocessedfile).get_data()
        maskedinput = np.zeros_like(inputdata)
        for timepoint in range(inputdata.shape[3]):
            maskedinput[:,:,:,timepoint] = np.where(sourcemask_xfm,inputdata[:,:,:,timepoint],0)
        maskedinputfile = os.path.abspath('inputfile.nii')
        inputImg = nb.Nifti1Image(maskedinput, None)
        nb.save(inputImg, maskedinputfile)

        ##PREPARE TARGET MASK##

        #define target mask (surface, volume)
        targetmask = get_mask(self.inputs.vol_target, self.inputs.parcfile)
        targetmaskfile = os.path.abspath('targetmask.nii')
        targetImg = nb.Nifti1Image(targetmask, None)
        nb.save(targetImg, targetmaskfile)

        #same transform for target
        targetxfm = fsl.ApplyXfm()
        targetxfm.inputs.in_file = targetmaskfile
        targetxfm.inputs.in_matrix_file = invt_result.outputs.out_file
        _, base, _ = split_filename(targetmaskfile)
        targetxfm.inputs.out_file = base + '_xfm.nii.gz'
        targetxfm.inputs.reference = preprocessedfile
        targetxfm.inputs.interp = 'nearestneighbour'
        targetxfm.inputs.apply_xfm = True
        targetxfm_result = targetxfm.run()

        return runtime
Ejemplo n.º 10
0
def quilting(texture_image_name, transfer_image_name):
    texture_image = load_image(path+texture_image_name)
    transfer_image = load_image(path+transfer_image_name)
    [h, w, _] = texture_image.shape
    [out_x, out_y, g] = transfer_image.shape
    blocks = []
    result = np.ones([out_x, out_y, g])*-1

    for i in range(h-block_size+1):
        for j in range(w-block_size+1):
            blocks.append(texture_image[i:i+block_size, j:j+block_size, :])

    blocks = np.array(blocks)
    # fill first top left corner of result image with random block
    result[0:block_size, 0:block_size, :] = blocks[np.random.randint(len(blocks))]

    blocks_in_row = int(np.ceil( (out_x-block_size)/ (block_size-overlap_size) ) + 1)
    blocks_in_col = int(np.ceil( (out_y-block_size)/ (block_size-overlap_size) ) + 1)

    for i in range(blocks_in_row):
        for j in range(blocks_in_col):
            if i == 0 and j == 0:
                continue
            # start and end locations of pixels in result image to be filled
            start_i = i*(block_size-overlap_size)
            start_j = j*(block_size-overlap_size)
            end_i = min(start_i + block_size, out_x)
            end_j = min(start_j + block_size, out_y)

            curr_box_to_fill = result[start_i:end_i, start_j:end_j, :]
            target_block = transfer_image[start_i:end_i, start_j:end_j, :]

            best_block = compare_blocks(blocks, curr_box_to_fill, target_block, block_size, alpha, tolerance)

            if i==0:
                ov = curr_box_to_fill[:,:overlap_size,:]
                mask = get_mask(best_block, ov, None, 'v', overlap_size)
            elif j==0:
                ov = curr_box_to_fill[:overlap_size, :, :]
                mask = get_mask(best_block, None, ov, 'h', overlap_size)
            else:
                ov_v = curr_box_to_fill[:,:overlap_size,:]
                ov_h = curr_box_to_fill[:overlap_size, :, :]
                mask = get_mask(best_block, ov_v, ov_h, 'v+h', overlap_size)

            curr_box_to_fill = curr_box_to_fill*(mask==0)
            result[start_i:end_i, start_j:end_j, :] = curr_box_to_fill + best_block*(mask)

            completion = 100.0/blocks_in_row*(i + j*1.0/blocks_in_col)

            print("Transfer for {name} {per:.2f}% complete...".format(name=transfer_image_name, per=completion))

    result = np.asarray(result, dtype=np.uint8)
    Image.fromarray(result).save(path+'results/'+texture_image_name.split('.')[0]+'_'+transfer_image_name.split('.')[0]+'_b='+str(block_size)+'_ov='+str(overlap_size)+'_a='+str(tolerance)+'.png')
    return Image.fromarray(result)
Ejemplo n.º 11
0
    def forward(self, q1, q1_lengths, q2, q2_lengths):

        # 获取 mask, 用于后面 的 attention
        q1_mask = get_mask(q1, q1_lengths).to(self.device)
        q2_mask = get_mask(q2, q2_lengths).to(self.device)

        # 获取 embedding
        q1_embed = self.word_embedding(q1)
        q2_embed = self.word_embedding(q2)

        # 对 embedding 进行dropout
        if self.dropout:
            q1_embed = self.rnn_dropout(q1_embed)
            q2_embed = self.rnn_dropout(q2_embed)

        # bilstm
        try:
            q1_encoded = self.first_rnn(q1_embed, q1_lengths)
            q2_encoded = self.first_rnn(q2_embed, q2_lengths)
        except:
            print(q1_lengths, q2_lengths)
        # attention
        q1_aligned, q2_aligned = self.attention(q1_encoded, q1_mask, q2_encoded, q2_mask)

        # concat
        q1_combined = torch.cat([q1_encoded, q1_aligned, q1_encoded - q1_aligned, q1_encoded * q1_aligned], dim=-1)
        q2_combined = torch.cat([q2_encoded, q2_aligned, q2_encoded - q2_aligned, q2_encoded * q2_aligned], dim=-1)

        # 映射,降低维度
        projected_q1 = self.projection(q1_combined)
        projected_q2 = self.projection(q2_combined)

        if self.dropout:
            projected_q1 = self.rnn_dropout(projected_q1)
            projected_q2 = self.rnn_dropout(projected_q2)

        # 再一次 rnn,使用 lstm
        q1_compare = self.second_rnn(projected_q1, q1_lengths)
        q2_compare = self.second_rnn(projected_q2, q2_lengths)

        # 平均池化 + 最大池化
        q1_avg_pool = torch.sum(q1_compare * q1_mask.unsqueeze(1).transpose(2, 1), dim=1)/torch.sum(q1_mask, dim=1, keepdim=True)
        q2_avg_pool = torch.sum(q2_compare * q2_mask.unsqueeze(1).transpose(2, 1), dim=1)/torch.sum(q2_mask, dim=1, keepdim=True)

        q1_max_pool, _ = replace_masked(q1_compare, q1_mask, -1e7).max(dim=1)
        q2_max_pool, _ = replace_masked(q2_compare, q2_mask, -1e7).max(dim=1)

        # 拼接成最后的特征向量
        merged = torch.cat([q1_avg_pool, q1_max_pool, q2_avg_pool, q2_max_pool], dim=1)

        # 分类
        logits = self.classification(merged)
        probs = nn.functional.softmax(logits, dim=-1)
        return logits, probs
    def collate_fn(self, batch):
        u_ids, i_ids, ratings, u_docs, i_docs = zip(*batch)
        
        u_ids = LongTensor(u_ids)
        i_ids = LongTensor(i_ids)
        ratings = FloatTensor(ratings)
        u_docs = LongTensor(u_docs)
        i_docs= LongTensor(i_docs)
        u_doc_word_masks = get_mask(u_docs)
        i_doc_word_masks = get_mask(i_docs)

        return u_docs, i_docs, u_doc_word_masks, i_doc_word_masks, u_ids, i_ids, ratings
Ejemplo n.º 13
0
    def forward(self, batch):
        """
        :param batch:
        :return: (2, batch_size, c_len)
        """
        content = batch[0: 4]
        question = batch[4: 6]

        # mask
        content_mask = utils.get_mask(content[0])
        question_mask = utils.get_mask(question[0])

        # embedding
        content = self.embedding(content, True)  # (c_len, batch_size, w2i_size+6)
        question = self.embedding(question, False)  # (q_len, batch_size, w2i_size+4)

        # embedding done
        if self.flag:
            content = f.dropout(content, p=self.encoder_dropout_p, training=self.training)
            question = f.dropout(question, p=self.encoder_dropout_p, training=self.training)
            content = self.highway_c(content)
            question = self.highway_q(question)  # (q_len, batch_size, w2i_size+4)

        # conv
        content = content.transpose(0, 1).transpose(1, 2)  # (batch_size, w2i_size+6, c_len)
        question = question.transpose(0, 1).transpose(1, 2)  # (batch_size, w2i_size+4, q_len)
        content = self.content_conv(content)  # (batch_size, hidden_size, c_len)
        question = self.question_conv(question)  # (batch_size, hidden_size, q_len)

        # encoder
        content = self.c_enc(content, content_mask)
        question = self.q_enc(question, question_mask)

        # cq attention
        X = self.cq_att(content, content_mask, question, question_mask)  # (batch_size, d*4, c_len)

        # model encoder layer
        M0 = self.cq_resizer(X)  # (batch_size, d, c_len)
        for enc in self.model_enc_blks:
            M0 = enc(M0, content_mask)
        M1 = M0
        for enc in self.model_enc_blks:
            M1 = enc(M1, content_mask)
        M2 = M1
        for enc in self.model_enc_blks:
            M2 = enc(M2, content_mask)

        result = self.pointer(M0, M1, M2, content_mask)

        return result
Ejemplo n.º 14
0
    def forward(self, batch):
        content = batch[:3]
        question = batch[3:6]

        # mask
        content_mask = utils.get_mask(content[0])
        question_mask = utils.get_mask(question[0])

        # embedding
        content_vec = self.embedding(content)
        question_vec = self.embedding(question)

        # encoder
        content_vec = self.encoder(content_vec, content_mask)
        question_vec = self.encoder(question_vec, question_mask)

        # aligner
        align_ct = content_vec
        for i in range(num_align_hops):
            qt_align_ct = self.aligner[i](align_ct, question_vec,
                                          question_mask)
            bar_ct = self.aligner_sfu[i](
                align_ct,
                torch.cat([
                    qt_align_ct, align_ct * qt_align_ct, align_ct - qt_align_ct
                ],
                          dim=2))

            ct_align_ct = self.self_aligner[i](bar_ct, content_mask)
            hat_ct = self.self_aligner_sfu[i](
                bar_ct,
                torch.cat(
                    [ct_align_ct, bar_ct * ct_align_ct, bar_ct - ct_align_ct],
                    dim=2))
            align_ct = self.aggregation[i](hat_ct, content_mask)

        # init state
        zs = self.init_state(question_vec, question_mask)

        # pointer
        for i in range(num_ptr_hops):
            ans_range, zs = self.ptr_net[i](align_ct, content_mask, zs)

        # add 1e-6
        content_mask = content_mask.float()
        new_mask = (content_mask - 1) * (-1e-30)
        ans_range = ans_range + new_mask.unsqueeze(0)

        return ans_range
Ejemplo n.º 15
0
def test_model(dataloader, model, word_vocab, label_vocab, pred_file, use_gpu=False):
    model.eval()
    prediction = []
    for batch in dataloader:
        batch_text, seq_length, word_perm_idx = batch['text']
        batch_label, _, _ = batch['label']
        char_inputs = batch['char']
        char_inputs = char_inputs[word_perm_idx]
        char_dim = char_inputs.size(-1)
        char_inputs = char_inputs.contiguous().view(-1, char_dim)
        if use_gpu:
            batch_text = batch_text.cuda()
            batch_label = batch_label.cuda()
            char_inputs = char_inputs.cuda()
        mask = get_mask(batch_text)
        with torch.no_grad():
            tag_seq = model(batch_text, seq_length, char_inputs, batch_label, mask)
        for line_tesor, labels_tensor, predicts_tensor in zip(batch_text, batch_label, tag_seq):
            for word_tensor, label_tensor, predict_tensor in zip(line_tesor, labels_tensor, predicts_tensor):
                if word_tensor.item() == 0:
                    break
                line = ' '.join(
                    [word_vocab.id_to_word(word_tensor.item()), label_vocab.id_to_label(label_tensor.item()),
                     label_vocab.id_to_label(predict_tensor.item())])
                prediction.append(line)
            prediction.append('')

    with open(pred_file, 'w', encoding='utf-8') as f:
        f.write('\n'.join(prediction))
Ejemplo n.º 16
0
    def forward(self, input, target, length):
        """compute the loss with output and the desired target

        Parameters:
            input: the output of the RNN model, being an predicted embedding
            target: the supervised training label.

        Shape:
            - input: :math:`(N, E)` where `N = number of tokens, E = embedding size`
            - target: :math:`(N)`

        Return:
            the scalar Variable ready for backward
        """

        decoded = self.decoder(input).contiguous()
        mask = get_mask(length)
        loss = self.criterion(
            decoded.view(-1, decoded.size(2)), target.view(-1)
        ).view(decoded.size(0), decoded.size(1))
        loss = torch.masked_select(loss, mask)
        if self.size_average:
            return loss.mean()
        else:
            return loss.sum()
Ejemplo n.º 17
0
    def __getitem__(self, index):
        r"""
            Make a mask for original Image!
        """
        if self.phase == 'trainval':
            self.do_mask = bool(random.getrandbits(1))
            #self.do_mask = False
        else:
            self.do_mask = False

        file_name = self.path_list[index]
        person_img = Image.open(file_name)
        if self.do_mask and not ('none' in file_name.split('/')[-1]):
            key = str('/'.join(file_name.split('/')[-3:]))
            joints = self.joints_dict[key]
            # get mask. scale =500 for test,,
            if len(joints) > 8:
                mask = utils.get_mask(person_img, joints, scale=self.scale)
                person_img = utils.get_partial_body(person_img, mask)

        if self.data_transform is not None:
            person_img = self.data_transform(person_img)

        img = person_img

        target = self.labels_list[index]
        ###################################################
        #pos = self.pos_dict[self.frame_key_list[index]] # [, 2]
        #pos = np.resize(pos, (self.num_players,2))
        ##################################################

        return img, int(target)
Ejemplo n.º 18
0
def save_masks_alphas(image_paths, root_dir, add_mask_paths=True):
    alpha_dir = os.path.join(root_dir, 'alphas')
    mask_dir = os.path.join(root_dir, 'masks')
    trimap_dir = os.path.join(root_dir, 'trimaps')
    if not os.path.exists(alpha_dir):
        os.mkdir(alpha_dir)
    if not os.path.exists(mask_dir):
        os.mkdir(mask_dir)
    if not os.path.exists(trimap_dir):
        os.mkdir(trimap_dir)
    new_image_paths = []
    for i, [clip, matting] in enumerate(image_paths):
        matting_img = cv2.imread(matting, -1)
        if matting_img is None:
            print("{} does not exist".format(matting))
        alpha = utils.get_alpha(matting_img)
        mask = utils.get_mask(alpha)
        trimap = utils.get_trimap(alpha)
        image_name = matting.split('/')[-1]
        alpha_path = os.path.join(alpha_dir, image_name)
        mask_path = os.path.join(mask_dir, image_name)
        trimap_path = os.path.join(trimap_dir, image_name)
        alpha_path = alpha_path.replace('\\', '/')
        mask_path = mask_path.replace('\\', '/')
        trimap_path = trimap_path.replace('\\', '/')
        cv2.imwrite(alpha_path, alpha)
        cv2.imwrite(mask_path, mask)
        cv2.imwrite(trimap_path, trimap)
        if add_mask_paths:
            new_image_paths.append(
                [clip, matting, alpha_path, mask_path, trimap_path])
        else:
            new_image_paths.append([clip, matting])
    return new_image_paths
Ejemplo n.º 19
0
def task_b(filepath: str, tokenizer, truncate=512):
    nums, ids, tweets, _, label_b, _ = read_file(filepath)
    # Only part of the tweets are useful for task b

    useful = label_b != 'NULL'
    ids = ids[useful]
    tweets = tweets[useful]
    label_b = label_b[useful]

    nums = len(label_b)
    # Tokenize
    # tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    token_ids = [
        tokenizer.encode(text=tweets[i],
                         add_special_tokens=True,
                         max_length=truncate) for i in range(nums)
    ]
    # Get mask
    mask = np.array(get_mask(token_ids))
    # Get lengths
    lens = get_lens(token_ids)
    # Pad tokens
    token_ids = np.array(pad_sents(token_ids, tokenizer.pad_token_id))

    return ids, token_ids, lens, mask, label_b
def load_dataset(args, traindata_transform, testdata_transform):

    Images = []
    labels = []

    for i in os.listdir("new_results/images"):

        if (
            i != "annotations"
            and i != ".DS_Store"
            and i != "res"
            and i != "images"
            and i != "results"
        ):

            img = Image.open("new_results/images/" + i)
            # display(img)
            Images.append(img.resize((250, 250)))
            fp = open("new_results/annotations/" + i[:-4] + ".yaml", "r")
            mask = get_mask(yaml.load(fp))
            # plt.imshow(mask, cmap='hot', interpolation='nearest')
            # plt.show()
            labels.append(mask)

    X_train, X_test, y_train, y_test = train_test_split(
        Images, labels, test_size=0.3, random_state=42
    )

    traindataset = CharsDataset(X_train, y_train, transform=traindata_transform)
    trainloader = shuffle_loader(traindataset, args.batchsize_trainloader)

    testdataset = CharsDataset(X_test, y_test, transform=testdata_transform)
    testloader = shuffle_loader(testdataset, args.batchsize_testloader)

    return trainloader, testloader
Ejemplo n.º 21
0
def train_model(dataloader,
                model,
                optimizer,
                batch_num,
                writer,
                use_gpu=False):
    model.train()
    for batch in dataloader:
        batch_num += 1
        model.zero_grad()
        batch_text, seq_length, word_perm_idx = batch['text']
        batch_label, _, _ = batch['label']
        char_inputs = batch['char']
        char_inputs = char_inputs[word_perm_idx]
        char_dim = char_inputs.size(-1)
        char_inputs = char_inputs.contiguous().view(-1, char_dim)
        if use_gpu:
            batch_text = batch_text.cuda()
            batch_label = batch_label.cuda()
            char_inputs = char_inputs.cuda()
        mask = get_mask(batch_text)
        loss = model.neg_log_likelihood_loss(batch_text, seq_length,
                                             char_inputs, batch_label, mask)
        writer.add_scalar('loss', loss, batch_num)
        loss.backward()
        clip_grad_norm_(model.parameters(), 5.0)
        optimizer.step()

    return batch_num
Ejemplo n.º 22
0
def read_test_file_all(tokenizer, truncate=512):
    df = pd.read_csv(os.path.join(OLID_PATH, 'testset-levela.tsv'), sep='\t')
    df_a = pd.read_csv(os.path.join(OLID_PATH, 'labels-levela.csv'), sep=',')
    ids = np.array(df['id'].values)
    tweets = np.array(df['tweet'].values)
    label_a = np.array(df_a['label'].values)
    nums = len(df)

    # Process tweets
    tweets = process_tweets(tweets)

    df_b = pd.read_csv(os.path.join(OLID_PATH, 'labels-levelb.csv'), sep=',')
    df_c = pd.read_csv(os.path.join(OLID_PATH, 'labels-levelc.csv'), sep=',')
    label_data_b = dict(zip(df_b['id'].values, df_b['label'].values))
    label_data_c = dict(zip(df_c['id'].values, df_c['label'].values))
    label_b = [
        label_data_b[id] if id in label_data_b.keys() else 'NULL' for id in ids
    ]
    label_c = [
        label_data_c[id] if id in label_data_c.keys() else 'NULL' for id in ids
    ]

    token_ids = [
        tokenizer.encode(text=tweets[i],
                         add_special_tokens=True,
                         max_length=truncate) for i in range(nums)
    ]
    mask = np.array(get_mask(token_ids))
    lens = get_lens(token_ids)
    token_ids = np.array(pad_sents(token_ids, tokenizer.pad_token_id))

    return ids, token_ids, lens, mask, label_a, label_b, label_c
Ejemplo n.º 23
0
    def forward_slow(self, input, target, length):

        mask = get_mask(length.data, max_len=input.size(1))
        rnn_output = self._rnn(input)
        l1 = self.criterion.forward_slow(target, rnn_output)
        l1 = torch.masked_select(l1, mask)
        return l1
Ejemplo n.º 24
0
def evaluate(sentence, max_length=10):
    sentence = tf.convert_to_tensor([sentence])
    sentence = tokenizer.pt.tokenize(sentence).to_tensor()

    encoder_input = sentence

    start, end = tokenizer.en.tokenize([''])[0]
    output = tf.convert_to_tensor([start])
    output = tf.expand_dims(output, 0)

    for i in range(max_length):
        enc_padding_mask, combined_mask, dec_padding_mask = get_mask(
            encoder_input, output)
        predictions, attention_weights = transformer(encoder_input, output,
                                                     False, enc_padding_mask,
                                                     combined_mask,
                                                     dec_padding_mask)
        predictions = predictions[:, -1:, :]  # batch_size, 1, vocab_size

        predicted_id = tf.argmax(predictions, axis=-1)
        output = tf.concat([output, predicted_id], axis=-1)

        if predicted_id == end:
            break

    text = tokenizer.en.detokenize(output)[0]

    tokens = tokenizer.en.lookup(output)[0]

    return text, tokens, attention_weights
Ejemplo n.º 25
0
    def __getitem__(self, idx):
        # load images ad masks

        img_path = os.path.join(self.path_images,
                                self.df_gatitos['file_name'][idx])

        img = Image.open(img_path).convert("RGB")
        # note that we haven't converted the mask to RGB,
        # because each color corresponds to a different instance
        # with 0 being background
        mask = get_mask(coordenates=self.df_gatitos['bbox'][idx],
                        height=self.df_gatitos['height'][idx],
                        width=self.df_gatitos['width'][idx])

        mask = np.array(mask)
        # instances are encoded as different colors
        obj_ids = np.unique(mask)
        # first id is the background, so remove it
        obj_ids = obj_ids[1:]

        # split the color-encoded mask into a set
        # of binary masks
        masks = mask == obj_ids[:, None, None]

        # get bounding box coordinates for each mask
        num_objs = len(obj_ids)
        boxes = []
        for i in range(num_objs):
            pos = np.where(masks[i])
            xmin = np.min(pos[1])
            xmax = np.max(pos[1])
            ymin = np.min(pos[0])
            ymax = np.max(pos[0])
            boxes.append([xmin, ymin, xmax, ymax])
        # print(boxes)
        # print(idx)
        boxes = torch.as_tensor(boxes, dtype=torch.float32)
        # there is only one class
        labels = torch.ones((num_objs, ), dtype=torch.int64)
        masks = torch.as_tensor(masks, dtype=torch.uint8)

        image_id = torch.tensor([idx])
        area = (boxes[:, 3] - boxes[:, 1]) * (
            boxes[:, 2] - boxes[:, 0])  # self.df_gatitos['area'][idx]#
        # suppose all instances are not crowd
        iscrowd = torch.zeros((num_objs, ), dtype=torch.int64)

        target = {}
        target["boxes"] = boxes
        target["labels"] = labels
        target["masks"] = masks
        target["image_id"] = image_id
        target["area"] = area
        target["iscrowd"] = iscrowd

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target
Ejemplo n.º 26
0
 def space_carve(self, im):
    mask = ut.get_mask(im)
    rt = json.loads(urllib.request.urlopen(self.vscan.localhost +  'camera_extrinsics').read().decode('utf-8'))
    rot = sum(rt['R'], [])
    tvec = rt['T']
    if self.n_dilation:
       for k in range(self.n_dilation): mask = binary_dilation(mask)    
    self.sc.process_view(self.intrinsics, rot, tvec, mask)
Ejemplo n.º 27
0
    def loss_and_norm_term(self, input, target, length):

        mask = get_mask(length.data, max_len=input.size(1))
        rnn_output = self._rnn(input)
        loss = self.criterion(target, rnn_output)
        loss = torch.masked_select(loss, mask)

        return loss.sum()
Ejemplo n.º 28
0
    def forward(self, input, target, length):

        mask = get_mask(length.data, max_len=input.size(1))
        rnn_output = self._rnn(input)
        loss = self.criterion(target, rnn_output)
        loss = torch.masked_select(loss, mask)

        return loss.mean()
    def collate_fn(self, batch):
        u_ids, i_ids, ratings, u_revs, i_revs, u_rids, i_rids = zip(*batch)
        
        u_ids = LongTensor(u_ids)
        i_ids = LongTensor(i_ids)
        ratings = FloatTensor(ratings)
        u_revs = LongTensor(u_revs)
        i_revs = LongTensor(i_revs)
        u_rids = LongTensor(u_rids)
        i_rids = LongTensor(i_rids)

        u_rev_word_masks = get_mask(u_revs)
        i_rev_word_masks = get_mask(i_revs)
        u_rev_masks = self.get_rev_mask(u_revs)
        i_rev_masks = self.get_rev_mask(i_revs)

        return u_revs, i_revs, u_rev_word_masks, i_rev_word_masks, u_rev_masks, i_rev_masks, u_ids, i_ids, ratings
 def get_sent_mask(batch_reviews):
     """
     NOTE: only deal with special condition where,
     batch_reviews: [bz, rn, sn, wn]
     sent_mask: [bz, rn, sn]
     """
     batch_sents = batch_reviews.sum(dim=-1)  #[bz, rn, sn]
     sent_mask = get_mask(batch_sents)
     return sent_mask
Ejemplo n.º 31
0
    def stream_chips(self):
        for (idx, img_fn) in self.stream_tile_fns():
            num_skipped_chips = 0
            
            img_path = os.path.join(self.data_root_dir, img_fn)
            
            # Read images
            img_fp = imread(img_path)
            
            # Read masks
            if img_fn in self.segmentation_df.index:
                mask_fp = get_mask(img_fn, self.segmentation_df)
            else:
                mask_fp = np.zeros((768, 768), dtype=np.uint8)
            
            height, width, channel = img_fp.shape
            l_height, l_width = mask_fp.shape

            assert height == l_height and width == l_width
            
            # Randomly sample NUM_PATCHES from image
            for i in range(self.num_patches):
                # Select the top left pixel of our chip randomly
                x = np.random.randint(0, width-self.large_chip_size)
                y = np.random.randint(0, height-self.large_chip_size)

                # Read imagery / labels
                p_img = None
                p_mask = None
            
                p_img = img_fp[y:y+self.large_chip_size, x:x+self.large_chip_size, :]
                p_mask = mask_fp[y:y+self.large_chip_size, x:x+self.large_chip_size]
                
                angles = [0,60,120,180,240,300]
                                
                for ang in range(len(angles)):
                    rotate_amount = angles[ang]
                    
                    temp_p_img = rotate(p_img, rotate_amount)
                    temp_p_mask = rotate(p_mask, rotate_amount, order=0)
                    temp_p_mask = (temp_p_mask * 255).astype(np.uint8)

                    temp_p_img = temp_p_img[CROP_POINT:CROP_POINT+CHIP_SIZE, CROP_POINT:CROP_POINT+CHIP_SIZE]
                    temp_p_mask = temp_p_mask[CROP_POINT:CROP_POINT+CHIP_SIZE, CROP_POINT:CROP_POINT+CHIP_SIZE]

                    temp_p_img = np.rollaxis(temp_p_img, 2, 0).astype(np.float32)
                    temp_p_img = torch.from_numpy(temp_p_img).squeeze()

                    temp_p_mask = temp_p_mask.astype(np.int64)
                    temp_p_mask = torch.from_numpy(temp_p_mask).unsqueeze(0)

                    yield temp_p_img, temp_p_mask, angles[ang]


            if num_skipped_chips > 0 and self.verbose:
                print("We skipped %d chips on %s" % (img_fn))
Ejemplo n.º 32
0
regfile = '/scr/ilz1/Data/results/func2anat_transform/_session_session1/_subject_id_9630905/_register0/FREESURFER.mat'

#labels
sourcelabels = [12114, 12113] #ctx_rh_G_front_inf-Triangul, ctx_rh_G_front_inf-Orbital
targetlabels = [11114] #ctx_lh_G_front_inf-Triangul
inputlabels = sourcelabels + targetlabels

#invert transform matrix
invt = fsl.ConvertXFM()
invt.inputs.in_file = regfile
invt.inputs.invert_xfm = True
invt.inputs.out_file = regfile + '_inv.mat'
invt_result= invt.run()

#define source mask (surface, volume)
sourcemask = get_mask(inputlabels)
sourcemaskfile = os.path.join(workingdir,'masks/','sourcemask.nii')
sourceImg = nb.Nifti1Image(sourcemask, None)
nb.save(sourceImg, sourcemaskfile)

#transform anatomical mask to functional space
sourcexfm = fsl.ApplyXfm()
sourcexfm.inputs.in_file = sourcemaskfile
sourcexfm.inputs.in_matrix_file = invt_result.outputs.out_file
_, base, _ = split_filename(sourcemaskfile)
sourcexfm.inputs.out_file = base + '_xfm.nii.gz'
sourcexfm.inputs.reference = preprocessedfile
sourcexfm.inputs.interp = 'nearestneighbour'
sourcexfm.inputs.apply_xfm = True
sourcexfm_result = sourcexfm.run()