示例#1
0
 def _create_examples(self, dicts, set_type):
   """Creates examples for the training and dev sets."""
   examples = []
   for (i, dict) in enumerate(dicts):
     guid = "%s-%s" % (set_type, str(i))
     text_a = tokenization.convert_to_unicode(dict['X'])
     label = tokenization.convert_to_unicode(dict['y'])
     examples.append(
         InputExample(guid=guid, text_a=text_a, label=label))
   return examples
示例#2
0
 def _create_examples(self, lines, set_type):
   """Creates examples for the training and dev sets."""
   examples = []
   for (i, line) in enumerate(lines):
     if i == 0:
       continue
     guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
     text_a = tokenization.convert_to_unicode(line[1])
     text_b = tokenization.convert_to_unicode(line[2])
     if set_type == "test":
       label = "entailment"
     else:
       label = tokenization.convert_to_unicode(line[3])
     examples.append(
         InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
   return examples
示例#3
0
 def _create_examples(self, lines, set_type):
   """Creates examples for the training and dev sets."""
   examples = []
   for (i, line) in enumerate(lines):
     if i == 0:
       continue
     guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
     if set_type == 'test':
       text_a = tokenization.convert_to_unicode(line[-2])
       text_b = tokenization.convert_to_unicode(line[-1])
       label = 0.0
     else:
       text_a = tokenization.convert_to_unicode(line[-3])
       text_b = tokenization.convert_to_unicode(line[-2])
       label = float(line[-1])
     examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
   return examples
示例#4
0
 def _create_examples(self, lines, set_type):
   """Creates examples for the training and dev sets."""
   examples = []
   for (i, line) in enumerate(lines):
     # Only the test set has a header
     if set_type == "test" and i == 0:
       continue
     guid = "%s-%s" % (set_type, i)
     if set_type == "test":
       text_a = tokenization.convert_to_unicode(line[1])
       label = "0"
     else:
       text_a = tokenization.convert_to_unicode(line[3])
       label = tokenization.convert_to_unicode(line[1])
     examples.append(
         InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
   return examples
示例#5
0
 def get_dev_examples(self, data_dir):
   """See base class."""
   lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
   examples = []
   for (i, line) in enumerate(lines):
     if i == 0:
       continue
     guid = "dev-%d" % (i)
     language = tokenization.convert_to_unicode(line[0])
     if language != tokenization.convert_to_unicode(self.language):
       continue
     text_a = tokenization.convert_to_unicode(line[6])
     text_b = tokenization.convert_to_unicode(line[7])
     label = tokenization.convert_to_unicode(line[1])
     examples.append(
         InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
   return examples
示例#6
0
 def get_train_examples(self, data_dir):
   """See base class."""
   lines = self._read_tsv(
       os.path.join(data_dir, "multinli",
                    "multinli.train.%s.tsv" % self.language))
   examples = []
   for (i, line) in enumerate(lines):
     if i == 0:
       continue
     guid = "train-%d" % (i)
     text_a = tokenization.convert_to_unicode(line[0])
     text_b = tokenization.convert_to_unicode(line[1])
     label = tokenization.convert_to_unicode(line[2])
     if label == tokenization.convert_to_unicode("contradictory"):
       label = tokenization.convert_to_unicode("contradiction")
     examples.append(
         InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
   return examples
示例#7
0
 def _create_examples(self, lines, set_type):
   """Creates examples for the training and dev sets."""
   examples = []
   for (i, line) in enumerate(lines):
     if i == 0:
       continue
     guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
     if set_type == "test":
       text_a = tokenization.convert_to_unicode(line[1])
       text_b = tokenization.convert_to_unicode(line[2])
       label = "0"
     else:
       if len(line) != 6:
         # there is a problematic line
         print(line)
         continue
       text_a = tokenization.convert_to_unicode(line[3])
       text_b = tokenization.convert_to_unicode(line[4])
       label = tokenization.convert_to_unicode(line[5])
     examples.append(
         InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
   return examples