Esempio n. 1
0
    def infer(self, inputs_per_batch, outputs_per_batch, output_file):
        # this function assumes it is run on 1 gpu with batch size of 1
        with codecs.open(output_file, 'w', 'utf-8') as fout:
            for step in range(len(inputs_per_batch)):
                input_values = inputs_per_batch[step][0][0]
                output_values = outputs_per_batch[step][0]
                output_string = text_ids_to_string(
                    output_values[0],
                    self.data_layer.params['target_idx2seq'],
                    S_ID=self.decoder.params['GO_SYMBOL'],
                    EOS_ID=self.decoder.params['END_SYMBOL'],
                    PAD_ID=self.decoder.params['PAD_SYMBOL'],
                    ignore_special=True,
                    delim=' ',
                )
                input_string = text_ids_to_string(
                    input_values[0],
                    self.data_layer.params['source_idx2seq'],
                    S_ID=self.decoder.params['GO_SYMBOL'],
                    EOS_ID=self.decoder.params['END_SYMBOL'],
                    PAD_ID=self.decoder.params['PAD_SYMBOL'],
                    ignore_special=True,
                    delim=' ',
                )
                fout.write(output_string + "\n")
                if step % 200 == 0:
                    if six.PY2:
                        input_string = input_string.encode('utf-8')
                        output_string = output_string.encode('utf-8')

                    deco_print("Input sequence:  {}".format(input_string))
                    deco_print("Output sequence: {}".format(output_string))
                    deco_print("")
Esempio n. 2
0
 def infer(self, input_values, output_values):
     input_strings, output_strings = [], []
     input_values = input_values['source_tensors']
     for input_sample, output_sample in zip(input_values, output_values):
         output_strings.append(
             text_ids_to_string(
                 output_sample[0],
                 self.get_data_layer().params['target_idx2seq'],
                 S_ID=self.decoder.params['GO_SYMBOL'],
                 EOS_ID=self.decoder.params['END_SYMBOL'],
                 PAD_ID=self.decoder.params['PAD_SYMBOL'],
                 ignore_special=True,
                 delim=' ',
             ))
         input_strings.append(
             text_ids_to_string(
                 input_sample[0],
                 self.get_data_layer().params['source_idx2seq'],
                 S_ID=self.decoder.params['GO_SYMBOL'],
                 EOS_ID=self.decoder.params['END_SYMBOL'],
                 PAD_ID=self.decoder.params['PAD_SYMBOL'],
                 ignore_special=True,
                 delim=' ',
             ))
     return input_strings, output_strings
 def infer(self, input_values, output_values):
     input_strings, output_strings = [], []
     input_values = input_values['source_tensors']
     for input_sample, output_sample in zip(input_values, output_values):
         for i in range(
                 0, input_sample.shape[0]):  # iterate over batch dimension
             output_strings.append(
                 text_ids_to_string(
                     output_sample[i],
                     self.get_data_layer().params['target_idx2seq'],
                     S_ID=self.decoder.params.get(
                         'GO_SYMBOL', SpecialTextTokens.S_ID.value),
                     EOS_ID=self.decoder.params.get(
                         'END_SYMBOL', SpecialTextTokens.EOS_ID),
                     PAD_ID=self.decoder.params.get(
                         'PAD_SYMBOL', SpecialTextTokens.PAD_ID),
                     ignore_special=True,
                     delim=' ',
                 ))
             input_strings.append(
                 text_ids_to_string(
                     input_sample[i],
                     self.get_data_layer().params['source_idx2seq'],
                     S_ID=self.decoder.params.get(
                         'GO_SYMBOL', SpecialTextTokens.S_ID.value),
                     EOS_ID=self.decoder.params.get(
                         'END_SYMBOL', SpecialTextTokens.EOS_ID.value),
                     PAD_ID=self.decoder.params.get(
                         'PAD_SYMBOL', SpecialTextTokens.PAD_ID),
                     ignore_special=True,
                     delim=' ',
                 ))
     return input_strings, output_strings
Esempio n. 4
0
 def infer(self, input_values, output_values):
   input_strings, output_strings = [], []
   input_values = input_values['source_tensors']
   for input_sample, output_sample in zip(input_values, output_values):
     output_strings.append(text_ids_to_string(
       output_sample[0],
       self.get_data_layer().params['target_idx2seq'],
       S_ID=self.decoder.params['GO_SYMBOL'],
       EOS_ID=self.decoder.params['END_SYMBOL'],
       PAD_ID=self.decoder.params['PAD_SYMBOL'],
       ignore_special=True, delim=' ',
     ))
     input_strings.append(text_ids_to_string(
       input_sample[0],
       self.get_data_layer().params['source_idx2seq'],
       S_ID=self.decoder.params['GO_SYMBOL'],
       EOS_ID=self.decoder.params['END_SYMBOL'],
       PAD_ID=self.decoder.params['PAD_SYMBOL'],
       ignore_special=True, delim=' ',
     ))
   return input_strings, output_strings