def __init__(self): is_pair = True class_labels = ['0', '1'] metric = CompositeEvalMetric() metric.add(F1(average='micro')) super(MultiRCTask, self).__init__(class_labels, metric, is_pair, output_format="jsonl")
def __init__(self): is_pair = True class_labels = ['0', '1'] metric = CompositeEvalMetric() metric.add(F1()) metric.add(Accuracy()) super(QQPTask, self).__init__(class_labels, metric, is_pair)
def __init__(self): is_pair = True class_labels = ['0', '1'] metric = CompositeEvalMetric() metric.add(F1()) metric.add(Accuracy()) super(ReCoRDTask, self).__init__(class_labels, metric, is_pair, output_format="jsonl")
def evaluate(net, dataloader, context): f1 = F1() for i, data in enumerate(dataloader): for idx in range(0, len(data)): data[idx] = data[idx].astype(np.float32).reshape( (-1, 1)).as_in_context(context) output, decoded = net(*data[:-1]) f1.update(data[len(data) - 1], output) return float(f1.get()[1])
def __init__(self, *args, **kwargs): # passthrough arguments to TSVDataset # (filename, field_separator=nlp.data.Splitter(','), num_discard_samples=1, field_indices=[2,1]) self.args = args self.kwargs = kwargs is_pair = False class_labels = ['0', '1'] metric = CompositeEvalMetric() metric.add(F1()) metric.add(Accuracy()) super(TSVClassificationTask, self).__init__(class_labels, metric, is_pair) dataset = nlp.data.TSVDataset(*self.args, **self.kwargs) # do the split train_sampler, val_sampler = get_split_samplers(dataset, split_ratio=0.8) self.trainset = SampledDataset(dataset, train_sampler) self.valset = SampledDataset(dataset, val_sampler)
def get_metric(): """Get metrics Accuracy and F1""" metric = CompositeEvalMetric() for child_metric in [Accuracy(), F1()]: metric.add(child_metric) return metric
def get_metric(cls): """Get metrics Accuracy and F1""" metric = CompositeEvalMetric() for child_metric in [Accuracy(), F1(average='micro')]: metric.add(child_metric) return metric