Exemplo n.º 1
0
    def __init__(self, fname, ivar = "TASK-COUNT",
                              non_decreasing = True,
                              custom_mapping = None,
                              per_cpu_task_counts = False,
                              num_cpus = None):
        self.ivar = ivar
        self.fields = []
        self.custom_mapping = custom_mapping

        data = load_column_csv(fname, convert=float)

        if not ivar in data.by_name:
            raise IOError, "Independent variable column {} is missing".format(ivar)


        for field in data.by_name:
            if field != ivar:
                self.fields.append(field)

                points = zip(data.by_name[ivar], data.by_name[field])
                if per_cpu_task_counts:
                    points = [(num_cpus * x, y) for (x, y) in points]
                if non_decreasing:
                    if non_decreasing:
                        self.__dict__[self.field_name(field)] = monotonic_pwlin(points)
                    else:
                        self.__dict__[self.field_name(field)] = piece_wise_linear(points)
Exemplo n.º 2
0
    def load_approximations(self,
                            fname,
                            non_decreasing=True,
                            custom_fields=None,
                            per_cpu_task_counts=False,
                            num_cpus=None):
        if custom_fields is None:
            custom_fields = []

        data = load_column_csv(fname, convert=float)
        if not 'TASK-COUNT' in data.by_name:
            raise IOError, "TASK-COUNT column is missing"

        # initialize custom fields, if any
        for (name, field) in custom_fields:
            self.__dict__[field] = const(0)

        for (name, field) in Overheads.FIELD_MAPPING + custom_fields:
            if name in data.by_name:
                points = zip(data.by_name['TASK-COUNT'], data.by_name[name])
                if per_cpu_task_counts:
                    points = [(num_cpus * x, y) for (x, y) in points]
                if non_decreasing:
                    self.__dict__[field] = monotonic_pwlin(points)
                else:
                    self.__dict__[field] = piece_wise_linear(points)
Exemplo n.º 3
0
    def load_approximations(self, fname, non_decreasing=True):
        data = load_column_csv(fname, convert=float)
        if not 'TASK-COUNT' in data.by_name:
            raise IOError, "TASK-COUNT column is missing"

        for (name, field) in Overheads.FIELD_MAPPING:
            if name in data.by_name:
                points = zip(data.by_name['TASK-COUNT'], data.by_name[name])
                if non_decreasing:
                    self.__dict__[field] = monotonic_pwlin(points)
                else:
                    self.__dict__[field] = piece_wise_linear(points)
Exemplo n.º 4
0
    def load_approximations(self, fname, non_decreasing=True):
        data = load_column_csv(fname, convert=float)
        if not 'TASK-COUNT' in data.by_name:
            raise IOError, "TASK-COUNT column is missing"

        for (name, field) in Overheads.FIELD_MAPPING:
            if name in data.by_name:
                points = zip(data.by_name['TASK-COUNT'], data.by_name[name])
                if non_decreasing:
                    self.__dict__[field] = monotonic_pwlin(points)
                else:
                    self.__dict__[field] = piece_wise_linear(points)
Exemplo n.º 5
0
    def from_file(fname, non_decreasing=True):
        data = load_column_csv(fname, convert=float)
        if not 'WSS' in data.by_name:
            raise IOError, 'WSS column is missing'

        o = CacheDelay()

        for idx, name in CacheDelay.MAPPING:
            if name in data.by_name:
                points = zip(data.by_name['WSS'], data.by_name[name])
                if non_decreasing:
                    o.mem_hierarchy[idx] = monotonic_pwlin(points)
                else:
                    o.mem_hierarchy[idx] = piece_wise_linear(points)
                o.__dict__[name] = o.mem_hierarchy[idx]
        return o
Exemplo n.º 6
0
    def from_file(fname, non_decreasing=True):
        data = load_column_csv(fname, convert=float)
        if not 'WSS' in data.by_name:
            raise IOError, 'WSS column is missing'

        o = CacheDelay()

        for idx, name in CacheDelay.MAPPING:
            if name in data.by_name:
                points = zip(data.by_name['WSS'], data.by_name[name])
                if non_decreasing:
                    o.mem_hierarchy[idx] = monotonic_pwlin(points)
                else:
                    o.mem_hierarchy[idx] = piece_wise_linear(points)
                o.__dict__[name] = o.mem_hierarchy[idx]
        return o
Exemplo n.º 7
0
    def load_approximations(self, fname, non_decreasing=True, custom_fields=None,
                            per_cpu_task_counts=False, num_cpus=None):
        if custom_fields is None:
            custom_fields = []

        data = load_column_csv(fname, convert=float)
        if not 'TASK-COUNT' in data.by_name:
            raise IOError, "TASK-COUNT column is missing"

        # initialize custom fields, if any
        for (name, field) in custom_fields:
            self.__dict__[field] = const(0)

        for (name, field) in Overheads.FIELD_MAPPING + custom_fields:
            if name in data.by_name:
                points = zip(data.by_name['TASK-COUNT'], data.by_name[name])
                if per_cpu_task_counts:
                    points = [(num_cpus * x, y) for (x, y) in points]
                if non_decreasing:
                    self.__dict__[field] = monotonic_pwlin(points)
                else:
                    self.__dict__[field] = piece_wise_linear(points)
Exemplo n.º 8
0
 def setUp(self):
     self.f     = m.lin(1, 3)
     self.c     = m.const(123)
     self.pwlin = m.monotonic_pwlin([(0, 1), (1, 0), (1, 4), (2, 5)])
Exemplo n.º 9
0
 def setUp(self):
     self.f     = m.lin(1, 3)
     self.c     = m.const(123)
     self.pwlin = m.monotonic_pwlin([(0, 1), (1, 0), (1, 4), (2, 5)])