Пример #1
0
def make_styler(col_map):
    try:
        return FieldStyle(col_map.get_values())
    except ExcessVarietyException:
        # Fallback, don't style by field values, instead create
        # a unique style for every combination of field values possible
        # This is significantly harder to visually parse
        log_once("Too many columns and/or column values to create pretty "
                 "and simple graphs!\nGiving each combination of properties "
                 "its own line.")
        return CombinationStyle(col_map)
Пример #2
0
def make_styler(col_map):
    try:
        return FieldStyle(col_map.get_values())
    except ExcessVarietyException:
        # Fallback, don't style by field values, instead create
        # a unique style for every combination of field values possible
        # This is significantly harder to visually parse
        log_once("Too many columns and/or column values to create pretty "
                 "and simple graphs!\nGiving each combination of properties "
                 "its own line.")
        return CombinationStyle(col_map)
Пример #3
0
def extract_sched_data(result, data_dir, work_dir):
    task_dict = create_task_dict(data_dir, work_dir)
    stat_data = defaultdict(list)

    # Group per-task values
    for tdata in task_dict.itervalues():
        if not tdata.params:
            # Currently unknown where these invalid tasks come from...
            continue

        miss = tdata.misses

        record_loss = float(miss.disjoints)/(miss.matches + miss.disjoints)
        stat_data["record-loss"].append(record_loss)

        if record_loss > conf.MAX_RECORD_LOSS:
            log_once(LOSS_MSG)
            continue

        miss_ratio = float(miss.num) / miss.matches
        avg_tard = miss.avg * miss_ratio

        stat_data["miss-ratio" ].append(miss_ratio)

        stat_data["max-tard"].append(miss.max / tdata.params.period)
        stat_data["avg-tard"].append(avg_tard / tdata.params.period)

        stat_data["avg-block"].append(tdata.blocks.avg / NSEC_PER_MSEC)
        stat_data["max-block"].append(tdata.blocks.max / NSEC_PER_MSEC)

    # Summarize value groups
    for name, data in stat_data.iteritems():
        if not data or not sum(data):
            log_once(SKIP_MSG, SKIP_MSG % name)
            continue
        result[name] = Measurement(str(name)).from_array(data)
Пример #4
0
def extract_sched_data(result, data_dir, work_dir):
    task_dict = create_task_dict(data_dir, work_dir)
    stat_data = defaultdict(list)

    # Group per-task values
    for tdata in task_dict.itervalues():
        if not tdata.params:
            # Currently unknown where these invalid tasks come from...
            continue

        miss = tdata.misses
        
        record_loss = float(miss.disjoints())/(miss.matches + miss.disjoints())
        stat_data["record-loss"].append(record_loss)

        if record_loss > conf.MAX_RECORD_LOSS:
            log_once("dir = {2}, miss.disjoints = {0}, miss.matches = {1} ratio= {3}%".format(unicode(miss.disjoints()), unicode(miss.matches), unicode(data_dir), unicode(100*record_loss)))
            
        if record_loss > conf.MAX_RECORD_LOSS:
            log_once(LOSS_MSG)
            continue

        miss_ratio = float(miss.num) / miss.matches
        avg_tard = miss.avg * miss_ratio

        stat_data["miss-ratio" ].append(miss_ratio)

        stat_data["tard-max"].append(float(miss.max) / tdata.params.period)
        stat_data["tard-avg"].append(avg_tard / tdata.params.period)

        stat_data["block-avg"].append(tdata.blocks.avg / NSEC_PER_MSEC)
        stat_data["block-max"].append(tdata.blocks.max / NSEC_PER_MSEC)
        
        preemptions = tdata.preemptions.get_preemptions()
        filtered_preemptions = tdata.preemptions.get_filtered_preemptions()
        migrations = tdata.preemptions.get_migrations()
        jobs = tdata.preemptions.get_jobs()
        stat_data["jobs"].append(jobs)
        stat_data["preemptions"].append(preemptions)
        stat_data["filtered-preemptions"].append(filtered_preemptions)
        stat_data["migrations"].append(migrations)
        stat_data["preemptions-per-job"].append(float(preemptions)/jobs)
        stat_data["migrations-per-job"].append(float(migrations)/jobs)
        stat_data["filtered-preemptions-per-job"].append(float(filtered_preemptions)/jobs)        

    # Summarize value groups
    for name, data in stat_data.iteritems():
        if not data:
            log_once(SKIP_MSG, SKIP_MSG % name)
            continue
        result[name] = Measurement(str(name)).from_array(data)