Exemplo n.º 1
0
    def post(self):
        assert util.development() or oauth.is_current_user_admin()
        util.log_upload_data(self.request.path, self.request.get("data"))
        data = StringIO.StringIO(self.request.get("data"))
        for line in data:
            data = json.loads(line)

            # We first load the fileset into the database
            # For use later, we also add a list of filenames in the fileset
            m = model.Metric(key_name=data["name"],
                             display_name=data["display name"],
                             distortion=data["distortion"],
                             yaxis=data.get("yaxis", None))
            m.put()
        model.metrics().invalidate()
Exemplo n.º 2
0
    def post(self):
        assert util.development() or oauth.is_current_user_admin()
        util.log_upload_data(self.request.path, self.request.get("data"))
        data = StringIO.StringIO(self.request.get("data"))
        for line in data:
            data = json.loads(line)

            # We first load the fileset into the database
            # For use later, we also add a list of filenames in the fileset
            m = model.Metric(key_name=data["name"],
                             display_name=data["display name"],
                             distortion=data["distortion"],
                             yaxis=data.get("yaxis", None))
            m.put()
        model.metrics().invalidate()
Exemplo n.º 3
0
def get_adhoc_improvement(metrics, config, filenames, commit):
    # Mostly copied from main.py with some notable changes
    response = []

    # Find the baseline based on the raw URL variables
    parent = main.find_baseline(",".join(metrics), config, ",".join(filenames),
                                commit)
    result = []

    for m in metrics:
        if model.metrics()[m].distortion:
            improvement = main.rd_improvement
        else:
            improvement = main.mean_improvement

        if parent:
            baseline_data = main.fetch_metric_for_fileset(
                m, config, filenames, parent)
            average, results = main.calculate_improvement(
                m, config, filenames, commit, baseline_data, improvement)
        else:
            results = dict([f, 0.0] for f in filenames)

        for f, composite in results.iteritems():
            response.append({
                'metric': m,
                'config': config,
                'baseline': parent,
                'filename': f,
                'value': composite
            })
    return response
Exemplo n.º 4
0
def fetch_config_info(metric, config, filename, commit):
    '''This function fetches the data for a given metric, config, filename,
    commit tuple. This functionality is used multiple places, such as
    CodecMetricHandler and AverageImprovementHandler.'''
    indexes = model.CodecMetricIndex.all(keys_only = True)
    indexes = indexes.filter('metrics =', metric)
    indexes = indexes.filter('config_name =', config)
    indexes = indexes.filter('files =', filename)
    indexes = indexes.filter('commit =', commit)
    keys = [k.parent() for k in indexes]

    if len(keys) == 0:
        return None

    metric_data = model.metrics()[metric]
    result=[]
    for cm in db.get(keys): # cm = codec metric

        # we get the runtime and config flags
        config_flags = cm.config_flags
        runtime_flags = cm.runtime_flags
        commit = cm.commit

        result.append((commit, config_flags, runtime_flags))

    # Sanity checks - we only want one runtime configuration
    assert len(result) == 1

    # We go ahead and return the tuple
    result = result[0]
    return result
Exemplo n.º 5
0
    def get(self, metric, config, filename, commit):
        """Fetches the requested metric data as JSON"""
        if not metric or not config or not filename or not commit:
            self.error(404)
            return

        filename = urllib.unquote(filename)
        commit = urllib.unquote(commit)

        if commit[0] == "~":
            result = {
                'yaxis':
                "Percent Improvement",
                'data':
                fetch_time_series(metric, config, filename, commit)[filename],
            }
        else:
            result = {
                'yaxis': model.metrics()[metric].yaxis,
                'data': fetch_codec_metric(metric, config, filename, commit),
            }

        # Return the result
        if result['data']:
            self.response.headers['Content-Type'] = 'application/json'
            self.response.out.write(pretty_json(result))
        else:
            self.error(404)
Exemplo n.º 6
0
    def get(self, metric, config, filename, commit):
        """Fetches the requested metric data as JSON"""
        if not metric or not config or not filename or not commit:
            self.error(404)
            return

        filename = urllib.unquote(filename)
        commit = urllib.unquote(commit)

        if commit[0] == "~":
            result = {'yaxis': "Percent Improvement",
                      'data': fetch_time_series(metric, config, filename,
                                                commit)[filename],
                     }
        else:
            result = {'yaxis': model.metrics()[metric].yaxis,
                      'data': fetch_codec_metric(metric, config, filename,
                                                 commit),
                      }

        # Return the result
        if result['data']:
            self.response.headers['Content-Type'] = 'application/json'
            self.response.out.write(pretty_json(result))
        else:
            self.error(404)
Exemplo n.º 7
0
def fetch_config_info(metric, config, filename, commit):
    '''This function fetches the data for a given metric, config, filename,
    commit tuple. This functionality is used multiple places, such as
    CodecMetricHandler and AverageImprovementHandler.'''
    indexes = model.CodecMetricIndex.all(keys_only=True)
    indexes = indexes.filter('metrics =', metric)
    indexes = indexes.filter('config_name =', config)
    indexes = indexes.filter('files =', filename)
    indexes = indexes.filter('commit =', commit)
    keys = [k.parent() for k in indexes]

    if len(keys) == 0:
        return None

    metric_data = model.metrics()[metric]
    result = []
    for cm in db.get(keys):  # cm = codec metric

        # we get the runtime and config flags
        config_flags = cm.config_flags
        runtime_flags = cm.runtime_flags
        commit = cm.commit

        result.append((commit, config_flags, runtime_flags))

    # Sanity checks - we only want one runtime configuration
    assert len(result) == 1

    # We go ahead and return the tuple
    result = result[0]
    return result
Exemplo n.º 8
0
def valid_cost(ep):
    metrics = []
    L_v_all = 0.
    all_keys = valid_image.keys()
    metric_per = np.zeros((6, 4))
    for k in all_keys:
        output_prob_np = np.zeros(valid_gt[k].shape)
        for i in range(0, output_prob_np.shape[0], batch_size):
            output_prob_np[i:i+batch_size] = sess.run(out_bin, feed_dict=\
            {x: valid_image[k][i:i+batch_size], \
            y_: valid_gt[k][i:i+batch_size], is_train:False})
        if plane == 'A1':
            output_prob_np = np.swapaxes(output_prob_np, 0, 1)
            output_prob_np = np.swapaxes(output_prob_np, 2, 1)
            valid_gt[k] = np.swapaxes(valid_gt[k], 0, 1)
            valid_gt[k] = np.swapaxes(valid_gt[k], 2, 1)
        if plane == 'A2':
            output_prob_np = np.swapaxes(output_prob_np, 0, 1)
            valid_gt[k] = np.swapaxes(valid_gt[k], 0, 1)
        for nn in range(6):
            class_met = model.metrics(output_prob_np[..., nn + 1],
                                      valid_gt[k][..., nn + 1])
            metric_per[nn, :] = metric_per[nn, :] + class_met.metrics_cal_cpu()
    metric_per = metric_per / (len(all_keys))
    print("Ave. Dice=", np.mean(metric_per, axis=0)[3])
    save(output_prob_np,
         directory + '/out_prob_' + str(ep).zfill(3) + '_' + str(k) + '.nii')
Exemplo n.º 9
0
 def query(self, metric, config, filename, commit):
     result = None
     metric_cache = model.metrics()
     for m in _split_field(metric):
         for cfg in _split_field(config):
             for f in _split_filename(filename):
                 for cm in _split_field(commit):
                     if not result:
                         result = self.query_(metric_cache, m,cfg,f,cm)
                     else:
                         r = self.query_(metric_cache, m,cfg,f,cm)
                         for idx in range(4):
                             result[idx] = result[idx].intersection(r[idx])
     return result
Exemplo n.º 10
0
 def query(self, metric, config, filename, commit):
     result = None
     metric_cache = model.metrics()
     for m in _split_field(metric):
         for cfg in _split_field(config):
             for f in _split_filename(filename):
                 for cm in _split_field(commit):
                     if not result:
                         result = self.query_(metric_cache, m, cfg, f, cm)
                     else:
                         r = self.query_(metric_cache, m, cfg, f, cm)
                         for idx in range(4):
                             result[idx] = result[idx].intersection(r[idx])
     return result
Exemplo n.º 11
0
def _metrics(cfg: dict):
    top_n = cfg['top_n']
    cfg_model, cfg_dataset = cfg['model'], cfg['dataset']
    cfg_results = cfg['results']

    _, col_files, col_reviewers = cfg_dataset['cols']
    x_df = utils \
        .read_csv(cfg_dataset['path'], usecols=cfg_dataset['cols']) \
        .pipe(utils.to_list_of_strings, col=col_files) \
        .pipe(utils.to_list_of_strings, col=col_reviewers)
    model_recommender = model.deserialize(cfg_model['path'])

    metrics_df = model.metrics(model_recommender,
                               x_df,
                               cfg_dataset['cols'],
                               top_n=top_n)

    if cfg_results.get('save', True):
        utils.save_csv(cfg_results['out'], metrics_df, index=True)
Exemplo n.º 12
0
def fetch_metric_for_fileset(metric, config, files, commit):
    """This function is a bulk version of fetch_codec_metric()"""
    indexes = model.CodecMetricIndex.all(keys_only = True)
    indexes = indexes.filter('metrics =', metric)
    indexes = indexes.filter('config_name =', config)
    indexes = indexes.filter('commit =', commit)
    keys = [k.parent() for k in indexes]

    if len(keys) == 0:
        return None

    metric_data = model.metrics()[metric]
    results_by_file = {}
    for cm in db.get(keys):
        for filename, runs in cm.data.iteritems():
            if filename not in files:
                continue
            result = results_by_file.get(filename, [])
            for run in runs:
                this_run_data = []

                if metric_data.distortion:
                    this_run_data.append(run["Bitrate"])

                this_run_data.append(run[metric])

                result.append(this_run_data)
            results_by_file[filename] = result

    # Sanity checks
    for filename, result in results_by_file.iteritems():
        for r in result[1:]:
            assert len(r) == len(result[0])

        # Result is a list of lists. Sort by the first element of the nested
        # list.
        results_by_file[filename] = sorted(result, key=lambda x:x[0])
    return results_by_file
Exemplo n.º 13
0
def fetch_metric_for_fileset(metric, config, files, commit):
    """This function is a bulk version of fetch_codec_metric()"""
    indexes = model.CodecMetricIndex.all(keys_only=True)
    indexes = indexes.filter('metrics =', metric)
    indexes = indexes.filter('config_name =', config)
    indexes = indexes.filter('commit =', commit)
    keys = [k.parent() for k in indexes]

    if len(keys) == 0:
        return None

    metric_data = model.metrics()[metric]
    results_by_file = {}
    for cm in db.get(keys):
        for filename, runs in cm.data.iteritems():
            if filename not in files:
                continue
            result = results_by_file.get(filename, [])
            for run in runs:
                this_run_data = []

                if metric_data.distortion:
                    this_run_data.append(run["Bitrate"])

                this_run_data.append(run[metric])

                result.append(this_run_data)
            results_by_file[filename] = result

    # Sanity checks
    for filename, result in results_by_file.iteritems():
        for r in result[1:]:
            assert len(r) == len(result[0])

        # Result is a list of lists. Sort by the first element of the nested
        # list.
        results_by_file[filename] = sorted(result, key=lambda x: x[0])
    return results_by_file
Exemplo n.º 14
0
def fetch_codec_metric(metric, config, filename, commit):
    '''This function fetches the data for a given metric, config, filename,
    commit tuple. This functionality is used multiple places, such as
    CodecMetricHandler and AverageImprovementHandler.'''
    indexes = model.CodecMetricIndex.all(keys_only = True)
    indexes = indexes.filter('metrics =', metric)
    indexes = indexes.filter('config_name =', config)
    indexes = indexes.filter('files =', filename)
    indexes = indexes.filter('commit =', commit)
    keys = [k.parent() for k in indexes]

    if len(keys) == 0:
        return None

    metric_data = model.metrics()[metric]
    result=[]
    for cm in db.get(keys):
        for run in cm.data[filename]:
            this_run_data = []

            if metric_data.distortion:
                this_run_data.append(run["Bitrate"])
                this_run_data.append(run["target_bitrate"])

            this_run_data.append(run[metric])

            result.append(this_run_data)

    # Sanity checks
    for r in result[1:]:
        assert len(r) == len(result[0])

    # Result is a list of lists. Sort by the first element of the nested
    # list.
    result = sorted(result, key=lambda x:x[0])
    return result
Exemplo n.º 15
0
def fetch_codec_metric(metric, config, filename, commit):
    '''This function fetches the data for a given metric, config, filename,
    commit tuple. This functionality is used multiple places, such as
    CodecMetricHandler and AverageImprovementHandler.'''
    indexes = model.CodecMetricIndex.all(keys_only=True)
    indexes = indexes.filter('metrics =', metric)
    indexes = indexes.filter('config_name =', config)
    indexes = indexes.filter('files =', filename)
    indexes = indexes.filter('commit =', commit)
    keys = [k.parent() for k in indexes]

    if len(keys) == 0:
        return None

    metric_data = model.metrics()[metric]
    result = []
    for cm in db.get(keys):
        for run in cm.data[filename]:
            this_run_data = []

            if metric_data.distortion:
                this_run_data.append(run["Bitrate"])
                this_run_data.append(run["target_bitrate"])

            this_run_data.append(run[metric])

            result.append(this_run_data)

    # Sanity checks
    for r in result[1:]:
        assert len(r) == len(result[0])

    # Result is a list of lists. Sort by the first element of the nested
    # list.
    result = sorted(result, key=lambda x: x[0])
    return result
Exemplo n.º 16
0
    def get_adhoc_improvement(self, metrics, configs, filenames, commits):
        """Calculates the requested composite metrics and outputs as JSON"""
        # Find the baseline based on the raw URL variables
        parent = find_baseline(metrics, configs, filenames, commits)
        # We format the end of the table with extra info
        if parent:
            parent_str = parent[:9]
        else:
            parent_str = "None found"

        result = []

        metrics = util.field_list(metrics)
        configs = util.field_list(configs)
        filenames = util.filename_list(filenames)
        commits = util.field_list(commits)

        # Fix for the case that a commit in commits has no parent
        # In this case we choose the oldest commit as the parent, ie the one
        # without a parent.
        if not parent:
            parent = commits[-1]

        metrics_cache = model.metrics()
        for m in metrics:
            if metrics_cache[m].distortion:
                improvement = rd_improvement
            else:
                improvement = mean_improvement

            for cfg in configs:
                baseline_data = fetch_metric_for_fileset(
                    m, cfg, filenames, parent)
                for cm in commits:
                    col = []  # Each m, cfg, cm combination will be a column in
                    # the table
                    average, results = calculate_improvement(
                        m, cfg, filenames, cm, baseline_data, improvement)
                    for f, composite in results.iteritems():
                        col.append([f, composite])

                    # Build the column name
                    col_name = []
                    if len(metrics) > 1:
                        col_name.append(m)
                    if len(configs) > 1:
                        col_name.append(cfg)
                    if len(col_name) == 0 or len(commits) > 1:
                        col_name.append(cm[:9])
                    col_name = "/".join(col_name)

                    col.append(['OVERALL: (' + parent_str + ')', average])
                    result.append({'col': col_name, 'data': col})
        # return the results
        result = {
            'baseline': parent,
            'data': result,
            'commits': ','.join(commits)
        }
        self.response.headers['Content-Type'] = 'application/json'
        self.response.out.write(pretty_json(result))
Exemplo n.º 17
0
    def get_adhoc_improvement(self, metrics, configs, filenames, commits):
        """Calculates the requested composite metrics and outputs as JSON"""
        # Find the baseline based on the raw URL variables
        parent = find_baseline(metrics, configs, filenames, commits)
        # We format the end of the table with extra info
        if parent:
            parent_str = parent[:9]
        else:
            parent_str = "None found"

        result = []

        metrics = util.field_list(metrics)
        configs = util.field_list(configs)
        filenames = util.filename_list(filenames)
        commits = util.field_list(commits)

        # Fix for the case that a commit in commits has no parent
        # In this case we choose the oldest commit as the parent, ie the one
        # without a parent.
        if not parent:
            parent = commits[-1]

        metrics_cache = model.metrics()
        for m in metrics:
            if metrics_cache[m].distortion:
                improvement = rd_improvement
            else:
                improvement = mean_improvement

            for cfg in configs:
                baseline_data = fetch_metric_for_fileset(m, cfg, filenames,
                                                         parent)
                for cm in commits:
                    col = [] # Each m, cfg, cm combination will be a column in
                             # the table
                    average, results = calculate_improvement(
                        m, cfg, filenames, cm, baseline_data, improvement)
                    for f, composite in results.iteritems():
                        col.append([f, composite])

                    # Build the column name
                    col_name = []
                    if len(metrics) > 1:
                        col_name.append(m)
                    if len(configs) > 1:
                        col_name.append(cfg)
                    if len(col_name) == 0 or len(commits) > 1:
                        col_name.append(cm[:9])
                    col_name = "/".join(col_name)

                    col.append(['OVERALL: (' + parent_str + ')', average])
                    result.append({'col': col_name,
                                   'data': col})
        # return the results
        result = {'baseline': parent,
                  'data': result,
                  'commits': ','.join(commits)
                  }
        self.response.headers['Content-Type'] = 'application/json'
        self.response.out.write(pretty_json(result))