Ejemplo n.º 1
0
    def eval(self,
             exp_dir,
             predictions,
             runtimes,
             label=None,
             only_metrics=False):
        pred_filename = 'jst_predictions_{}.csv'.format(label)
        self.save_tusimple_predictions(predictions, runtimes, pred_filename)
        if self.metric == 'default':
            result = json.loads(
                LaneEval.bench_one_submit(pred_filename, self.anno_files[0]))
        elif self.metric == 'ours':
            result = json.loads(
                eval_json(pred_filename,
                          self.anno_files[0],
                          json_type='tusimple'))
        table = {}
        for metric in result:
            table[metric['name']] = [metric['value']]
        table = tabulate(table, headers='keys')

        if not only_metrics:
            filename = 'tusimple_{}_eval_result_{}.json'.format(
                self.split, label)
            with open(os.path.join(exp_dir, filename), 'w') as out_file:
                json.dump(result, out_file)

        return table, result
    def get_metrics(self, lanes, idx):
        label = self.annotations[idx]
        org_anno = label['old_anno']
        pred = self.pred2lanes(org_anno['path'], lanes, org_anno['y_samples'])
        _, _, _, matches, accs, dist = LaneEval.bench(pred, org_anno['org_lanes'], org_anno['y_samples'], 0, True)

        return matches, accs, dist
Ejemplo n.º 3
0
# Print the durations
print('The forward pass time for one image is: {}ms'.format(forward_avg *
                                                            1000))
print('The clustering time for one image is: {}ms'.format(cluster_avg * 1000))
print('The total time for one image is: {}ms'.format(
    (cluster_avg + forward_avg) * 1000))

print('The speed for forward pass is: {}fps'.format(1 / forward_avg))
print('The speed for forward pass is: {}fps'.format(1 / cluster_avg))

# Write the results in the pred.json file
# or in the pred_aug.json file in case you used
# the model trained on augmented data
# Uncomment corresponding line in second case
with open('TUSIMPLE/pred.json', 'w') as f:
    #with open('TUSIMPLE/pred_aug.json', 'w') as f:
    for res in json_pred:
        json.dump(res, f)
        f.write('\n')

# Evaluate our results using the TUSIMPLE competition script
# and their groundtruths
from utils.lane import LaneEval

# Uncomment corresponding line in case
# you used the model trained on augmented data
result = LaneEval.bench_one_submit('TUSIMPLE/pred.json',
                                   'TUSIMPLE/test_set/test_label.json')
#result = LaneEval.bench_one_submit('TUSIMPLE/pred_aug.json','TUSIMPLE/test_set/test_label.json')

print(result)