Пример #1
0
def evaluateFile(groundTruthFile, submissionFile, options):

    # Evaluation result (dictonary, one entry per event type)
    result = {}

    # Annotations (dictonary, one entry per event type)
    groundtruth = yacastIO.YacastAnnotations(groundTruthFile)

    # Read start time and end time day restriction
    if options.limitedhours:
        # Faire la lecture des valeurs de debut et fin et les faire passer dans options
        # Read
        f = open(options.path2xml_ts, 'r')
        string_start = f.readline()
        string_end = f.readline()
        f.close()

        # Check format
        if (re.match(r'\d{2}:\d{2}:\d{2}', string_start)) and (re.match(
                r'\d{2}:\d{2}:\d{2}', string_end)):
            list_start = re.split(r':', string_start)
            list_end = re.split(r':', string_end)
        else:
            print "%s > ERROR - Start time (or End time) should be formated as HH:MM:SS" % (
                options.path2xml_ts)
            return None

        # Write data in options
        options.startH = int(list_start[0])
        options.startM = int(list_start[1])
        options.startS = int(list_start[2])
        options.endH = int(list_end[0])
        options.endM = int(list_end[1])
        options.endS = int(list_end[2])

    # Detections (dictionary, one entry per event type)
    submission = submissionIO.Submission(submissionFile)

    # For each event type
    for eventType in groundtruth.eventList.keys():
        # Perform the evaluation
        if eventType in submission.detectionList.keys():
            result[eventType] = metric.compute_metric(
                groundtruth.eventList[eventType],
                submission.detectionList[eventType], options)
        else:
            result[eventType] = metric.compute_metric(
                groundtruth.eventList[eventType], [], options)

        # Store event type, participant ID and submission ID
        result[eventType].eventType = eventType
        result[eventType].participant = submission.participant
        result[eventType].submission = submission.ID

    return result
Пример #2
0
def evaluateFile( groundTruthFile, submissionFile, options):
    
    # Evaluation result (dictonary, one entry per event type)
    result = {}
    
    # Annotations (dictonary, one entry per event type)
    groundtruth = yacastIO.YacastAnnotations(groundTruthFile)
    
    # Read start time and end time day restriction
    if options.limitedhours:
            # Faire la lecture des valeurs de debut et fin et les faire passer dans options
            # Read
            f = open(options.path2xml_ts, 'r')
            string_start=f.readline()
            string_end=f.readline()
            f.close()
            
            # Check format
            if (re.match(r'\d{2}:\d{2}:\d{2}',string_start)) and (re.match(r'\d{2}:\d{2}:\d{2}',string_end)):
                list_start=re.split(r':',string_start)
                list_end=re.split(r':',string_end)
            else:
                print "%s > ERROR - Start time (or End time) should be formated as HH:MM:SS" % (options.path2xml_ts)
                return None
        
            # Write data in options
            options.startH=int(list_start[0])
            options.startM=int(list_start[1])
            options.startS=int(list_start[2])
            options.endH=int(list_end[0])
            options.endM=int(list_end[1])
            options.endS=int(list_end[2])

    # Detections (dictionary, one entry per event type)
    submission  = submissionIO.Submission(submissionFile)
    
    # For each event type
    for eventType in groundtruth.eventList.keys():
        # Perform the evaluation
        if eventType in submission.detectionList.keys():
            result[eventType] = metric.compute_metric(groundtruth.eventList[eventType], submission.detectionList[eventType], options)
        else:
            result[eventType] = metric.compute_metric(groundtruth.eventList[eventType], [], options)
        
        # Store event type, participant ID and submission ID
        result[eventType].eventType = eventType
        result[eventType].participant = submission.participant
        result[eventType].submission = submission.ID
    
    return result
Пример #3
0
def track_convergence(data, ref, test_dirs, metrics, eps=1e-2):
    """Track error convergence over partial renders."""

    num_order = lambda x: int(x.split('_')[-1].split('.')[0])
    round_10 = lambda x: int(round(x))

    # All partial directories (one per algorithm)
    all_stats = []
    for partial_dir in test_dirs:
        # Determine extension by checking first partial file
        name = partial_dir.split(os.path.sep)[-1].replace('_partial', '')
        ext = detect_extension(os.path.join(partial_dir, '{}_1'.format(name)))

        # List all partial files
        glob_expr = os.path.join(partial_dir, '{}_[0-9]*.{}'.format(name, ext))
        partial_files = glob.glob(glob_expr)
        partial_files = sorted(partial_files, key=num_order)

        # All partial images within a directory
        dir_stat = []
        for partial_f in partial_files:
            test = load_img(partial_f)

            # Compute all metrics on (ref, test) pair
            metric_dict = {}
            for metric in metrics:
                err_img = compute_metric(ref, test, metric.lower(), eps)
                err_mean = '{:.6f}'.format(np.mean(err_img))
                metric_dict[metric] = err_mean
            dir_stat.append(metric_dict)

        all_stats.append(dir_stat)

    # Not sure if there's a better way to do this, maybe using itertools.chain?
    all_metrics = {}
    for metric in metrics:
        all_metrics[metric] = []
    for p, partial_dir in enumerate(test_dirs):
        for metric in metrics:
            seq = [float(stat[metric]) for stat in all_stats[p]]
            all_metrics[metric].append(seq)

    # Insert into dictionary (the ugliness of this is an artefact of using JSON...)
    for t, test_dir in enumerate(test_dirs):
        time_file = os.path.basename('{}_time.csv'.format('_'.join(
            test_dir.split('_')[:-1])))
        with open(os.path.join(test_dir, time_file)) as fp:
            timesteps = [
                item for sublist in list(csv.reader(fp)) for item in sublist
            ]

        # Round to nearest ten, assuming frequency % 10 = 0
        timesteps = list(map(float, list(filter(None, timesteps))))
        timesteps = list(map(round_10, timesteps))

        for metric in metrics:
            for entry in data['stats'][0]['series']:
                if entry['label'] == metric.upper():
                    entry['track']['x'].append(timesteps)
                    entry['track']['y'].append(all_metrics[metric][t])
Пример #4
0
def update_stats(path_dir, data, ref, tests, metrics, clip, eps=1e-2):
    """Update some entries of data.js; assumes it was already created."""

    find_idx = lambda t, d: list(d['stats'][0]['labels']).index(t['name'])

    for test in tests:
        # Check if entry exists
        is_new = test['name'] not in data['stats'][0]['labels']

        # Update dictionary
        if is_new:
            data['imageBoxes'][0]['elements'].append(hdr_to_ldr(
                path_dir, test))
            data['stats'][0]['labels'].append(test['name'])
        else:
            t = find_idx(test, data)
            hdr_to_ldr(path_dir, test)

        # Compute desired metrics
        for m, metric in enumerate(metrics):
            # Recompute error
            err_img = compute_metric(ref, test['data'], metric.lower(), eps)
            err_mean = '{:.6f}'.format(np.mean(err_img))
            if is_new:
                data['stats'][0]['series'][m]['data'].append(err_mean)
            else:
                data['stats'][0]['series'][m]['data'][t] = err_mean

            # Recompute false color heatmap and save to files
            fc = falsecolor(err_img, clip, eps)
            fc_fname = '{}-{}.png'.format(test['name'], metric.upper())
            plt.imsave(os.path.join(path_dir, fc_fname), fc)

            if is_new:
                fc_entry = {
                    'title': test['name'],
                    'version': '-',
                    'image': fc_fname
                }
                data['imageBoxes'][m + 1]['elements'].append(fc_entry)

    # TODO: Update stats.json
    return data
Пример #5
0
def compute_stats(path_dir, ref, tests, metrics, clip, negpos, eps=1e-2):
    """Generate all false color LDR maps and dictionary for JS.
       Assumes tests = {'name': 'my_alg', 'data': ...}
    """

    data = {}
    data['imageBoxes'] = [{'title': 'Images', 'elements': []}]
    data['stats'] = [{'title': 'Stats', 'labels': [], 'series': []}]
    ref_entry = hdr_to_ldr(path_dir, {'name': 'Reference', 'data': ref})
    data['imageBoxes'][0]['elements'].append(ref_entry)

    # Generate images and compute stats
    # Couldn't find a way to do it all in only two loops
    stats = []
    for t, test in enumerate(tests):
        # Update dictionary
        data['imageBoxes'][0]['elements'].append(hdr_to_ldr(path_dir, test))
        data['stats'][0]['labels'].append(test['name'])

        # Compute all metrics
        stat_entry = {test['name']: {}}
        stats.append(stat_entry)
        for metric in metrics:
            # Compute error
            err_img = compute_metric(ref, test['data'], metric, eps)
            err_mean = '{:.6f}'.format(np.mean(err_img))

            # Compute false color heatmap and save to files
            fc = falsecolor(err_img, clip, eps)
            fc_fname = '{}-{}.png'.format(test['name'], metric.upper())
            plt.imsave(os.path.join(path_dir, fc_fname), fc)

            # Save stats, if necessary
            stats[t][test['name']][metric.upper()] = {
                'val': err_mean,
                'fc': fc_fname
            }

    # Write dictionary
    for metric in metrics:
        fc_entry = {'title': metric.upper(), 'elements': []}
        metric_entry = {
            'label': metric.upper(),
            'data': [],
            'track': {
                'x': [],
                'y': []
            }
        }

        for t, test in enumerate(tests):
            # Add false color filenames to dict
            fc_fname = stats[t][test['name']][metric.upper()]['fc']
            entry = {'title': test['name'], 'version': '-', 'image': fc_fname}
            fc_entry['elements'].append(entry)

            # Add metric value to dict
            err_mean = stats[t][test['name']][metric.upper()]['val']
            metric_entry['data'].append(err_mean)

        # Update dictionary with false color filenames and metrics
        data['imageBoxes'].append(fc_entry)
        data['stats'][0]['series'].append(metric_entry)

    # Write negative/positive image if requested
    if negpos:
        fc_entry = {'title': 'NP SMAPE', 'elements': []}
        for t, test in enumerate(tests):
            # Compute the N/P false color image
            fc = falsecolor_np(ref, test['data'], eps)
            fc_fname = '{}-NP.png'.format(test['name'])
            plt.imsave(os.path.join(path_dir, fc_fname), fc)

            # Save the fcname inside JSON
            entry = {'title': test['name'], 'version': '-', 'image': fc_fname}
            fc_entry['elements'].append(entry)

        # Update dictionary with false color filenames
        data['imageBoxes'].append(fc_entry)

    generate_thumbnail(path_dir, ref)
    return data
Пример #6
0
def track_convergence(data, ref, test_dirs, metrics, time, eps=1e-2, exposure=1, sametime=False):
    """Track error convergence over partial renders."""

    def num_order(x): return int(x.split('_')[-1].split('.')[0])
    def round_10(x): return int(round(x))

    # All partial directories (one per algorithm)
    all_stats = []
    for partial_dir in test_dirs:
        # Determine extension by checking first partial file
        name = partial_dir.split(os.path.sep)[-1].replace('_partial', '')
        ext = detect_extension(os.path.join(partial_dir, '{}_1'.format(name)))

        # List all partial files
        glob_expr = os.path.join(partial_dir, '{}_[0-9]*.{}'.format(name, ext))
        partial_files = glob.glob(glob_expr)
        partial_files = sorted(partial_files, key=num_order)

        # All partial images within a directory
        dir_stat = []
        for partial_f in partial_files:
            test = load_img(partial_f) * exposure

            # Compute all metrics on (ref, test) pair
            metric_dict = {}
            for metric in metrics:
                err_img = compute_metric(ref, test, metric.lower(), eps)
                err_mean = '{:.6f}'.format(np.mean(err_img))
                metric_dict[metric] = err_mean
            dir_stat.append(metric_dict)

        all_stats.append(dir_stat)

    # Not sure if there's a better way to do this, maybe using itertools.chain?
    all_metrics = {}
    for metric in metrics:
        all_metrics[metric] = []
    for p, partial_dir in enumerate(test_dirs):
        for metric in metrics:
            seq = [float(stat[metric]) for stat in all_stats[p]]
            all_metrics[metric].append(seq)

    if time:
            time_entry = {'label' : "time", 'data': [], 'track' : {"x" : [], "y" : []}}
            data['stats'][0]['series'].append(time_entry)

    # Insert into dictionary (the ugliness of this is an artefact of using JSON...)
    for t, test_dir in enumerate(test_dirs):
        time_file = os.path.basename('{}_time.csv'.format(test_dir))
        with open(os.path.join(test_dir, time_file)) as fp:
            timesteps = [item for sublist in list(
                csv.reader(fp)) for item in sublist]
        if sametime:
            timesteps = [float(i) for i in range(len(timesteps))]
        else:
            timesteps = list(map(float, list(filter(None, timesteps))))

        for metric in metrics:
            for entry in data['stats'][0]['series']:
                if entry['label'] == metric.upper():
                    entry['track']['x'].append(timesteps)
                    entry['track']['y'].append(all_metrics[metric][t])
        
        # Write the time per iteration 
        if time:
            def time_conv(i):
                if i == 0:
                    return timesteps[0]
                else:
                    return timesteps[i] - timesteps[i-1]
            time_entry['track']['x']
            time_entry['track']['x'].append([i for i in range(len(timesteps))])
            time_entry['track']['y'].append([time_conv(i) for i in range(len(timesteps))])
            time_entry['data'].append('{:.6f}'.format(np.mean([time_conv(i) for i in range(len(timesteps))])))