Beispiel #1
0
 def get_data(self, metric, job_id, user_id=0, params=None):
     metric = metric[0]
     if job_id == 0:
         return [{
             'target': 'Error: Please specify valid job_id',
             'datapoints': []
         }]
     # Get components with data during given time range
     self.src.select(['component_id'],
                     from_=[self.schema],
                     where=[['job_id', Sos.COND_EQ, job_id],
                            ['timestamp', Sos.COND_GE, self.start - 300],
                            ['timestamp', Sos.COND_LE, self.end + 300]],
                     order_by='job_time_comp')
     comps = self.src.get_results(limit=self.maxDataPoints)
     if not comps:
         return [{
             'target':
             'Error: component_id not found for Job ' + str(job_id),
             'datapoints': []
         }]
     else:
         compIds = np.unique(comps['component_id'].tolist())
     print(compIds)
     result = []
     datapoints = []
     time_range = self.end - self.start
     if time_range > 4096:
         bin_width = int(time_range // 200)
     else:
         bin_width = 1
     dfs = []
     for comp_id in compIds:
         where_ = [['component_id', Sos.COND_EQ, comp_id],
                   ['job_id', Sos.COND_EQ, job_id],
                   ['timestamp', Sos.COND_GE, self.start],
                   ['timestamp', Sos.COND_LE, self.end]]
         self.src.select([metric, 'timestamp'],
                         from_=[self.schema],
                         where=where_,
                         order_by='job_comp_time')
         # default for now is dataframe - will update with dataset vs dataframe option
         res = self.src.get_df(limit=self.maxDataPoints, index='timestamp')
         if res is None:
             continue
         rs = res.resample(str(bin_width) + 'S').fillna("backfill")
         dfs.append(rs)
     df = pd.concat(dfs, axis=1, ignore_index=True)
     res_ = DataSet()
     min_datapoints = df.min(axis=1, skipna=True)
     mean_datapoints = df.mean(axis=1, skipna=True)
     max_datapoints = df.max(axis=1, skipna=True)
     res_ = pd.DataFrame({
         "min_" + metric: min_datapoints.values,
         "mean_" + metric: mean_datapoints.values,
         "max_" + metric: max_datapoints.values,
         "timestamp": min_datapoints.index
     })
     return res_
Beispiel #2
0
    def maxrow(self, series):
        """Return the row with the maximum value in the series

        The result returned will have a single row containing the max
        value of the series specified.

        Positional Parameters:
        -- The name of the series
        """
        inp = self.stack.pop()
        res = DataSet().new(1, inp.series)

        src = inp.array(series)[0:inp.get_series_size()]
        row = np.argmax(src)
        for col in range(0, inp.get_series_count()):
            res.array(col)[0] = inp.array(col)[row]
        return self.stack.push(res)
Beispiel #3
0
    def unique(self, series_name, result=None):
        """Return the unique values of a series

        See numpy.unique for more information.

        Positional Parameters:
        -- A series name
        """
        inp = self.stack.pop()
        nda = inp.array(series_name)[0:inp.get_series_size()]
        if nda.ndim > 1:
            u = np.unique(nda, axis=0)
        else:
            u = np.unique(nda)

        if result == None:
            result = series_name + "_unique"
        res = DataSet()
        res.append_array(len(u), result, u)
        return self.stack.push(res)
 def get_data(self, metrics, job_id, user_id=0, params=None):
     if not job_id:
         return [{
             "columns": [{
                 "text": "No Job Id specified"
             }],
             "rows": [],
             "type": "table"
         }]
     xfrm, job = self.derived_metrics(job_id)
     if not job:
         return [{"columns": [], "rows": [], "type": "table"}]
     xfrm.push(job)
     idx = job.series.index('tot_ins')
     series = job.series[idx:]
     xfrm.mean(series,
               group_name='rank',
               keep=job.series[0:idx - 1],
               xfrm_suffix='')
     job = xfrm.pop()
     result = {}
     rows = []
     columns = []
     series_names = [
         'timestamp', 'job_id', 'component_id', 'rank', 'cpi', 'uopi',
         'l1_miss_rate', 'l1_miss_ratio', 'l2_miss_rate', 'l2_miss_ratio',
         'l3_miss_rate', 'l3_miss_ratio', 'fp_rate', 'branch_rate',
         'load_rate', 'store_rate'
     ]
     idx = series_names.index('timestamp')
     del series_names[idx]
     idx = series_names.index('job_id')
     del series_names[idx]
     idx = series_names.index('component_id')
     del series_names[idx]
     res_ = DataSet()
     for series in series_names:
         array = job.array(series)
         res_.append_array(len(array), series, np.nan_to_num(array))
     return res_
Beispiel #5
0
    def extract(self, series_list, rename=None, source=None, rows=None):
        """Extract series from a DataSet

        The result contains the series from the first argument
        optionally renamed as defined by the rename keyword parameter.

        The rename keyword must be None or the same length as the
        first parameter. A None in the list is ignored.

        Postional Parameters:
        -- A list of at series names

        Keyword Parameters:
        rename -- A list of names to rename each series to.
        source -- The source DataSet to use instead of TOP
        rows   -- Specifies a subset of rows from the source
        """
        if rename is None:
            series_names = series_list
        else:
            if len(series_list) != len(rename):
                raise ValueError("The rename list must be the same length "
                                 "as the series list")
            series_names = []
            for i in range(0, len(rename)):
                if rename[i] is None:
                    series_names.append(series_list[i])
                else:
                    series_names.append(rename[i])

        if source is None:
            source = self.stack.pop()

        if rows is None:
            series_size = source.get_series_size()
        else:
            series_size = rows[1] - rows[0]

        res = DataSet().new(series_size, series_names)

        for i in range(0, len(series_list)):
            if rows is None:
                res[i] = source[series_list[i]]
            else:
                res[i] = source[series_list[i]][rows[0]:rows[1]]

        return self.stack.push(res)
Beispiel #6
0
    def get_data(self, metrics, job_id=0, user_id=0, params=None):
        result = []
        datapoints = []
        where_ = [['timestamp', Sos.COND_GE, self.start],
                  ['timestamp', Sos.COND_LE, self.end]]
        self.src.select(metrics + ['timestamp'],
                        from_=[self.schema],
                        where=where_,
                        order_by='time_comp_job')
        inp = None

        # default for now is dataframe - will update with dataset vs dataframe option
        res = self.src.get_df()
        if res is None:
            return None
        mets = res.drop(res.tail(1).index)
        mets = mets.mean()
        time_range = self.end - self.start
        if time_range > 4096:
            bin_width = int(time_range / 200)
        else:
            bin_width = 1
        start_d = dt.datetime.utcfromtimestamp(
            self.start).strftime('%m/%d/%Y %H:%M:%S')
        end_d = dt.datetime.utcfromtimestamp(
            self.end).strftime('%m/%d/%Y %H:%M:%S')
        ts = pd.date_range(start=start_d, end=end_d, periods=len(mets.values))
        series = pd.DataFrame(mets.values, index=ts, dtype=float)
        rs = series.resample(str(bin_width) + 'S').mean()
        dps = rs.values.flatten()
        if len(dps) > 1:
            dps = np.diff(dps)
        tstamp = rs.index
        i = 0
        tstamps = []
        if len(tstamp) > 1:
            x = 1
        else:
            x = 0
        while i < len(tstamp[x:]):
            ts = pd.Timestamp(tstamp[i])
            ts = np.int_(ts.timestamp() * 1000)
            tstamps.append(ts)
            i += 1

        res_ = DataSet()
        res_.append_array(len(dps), str(metrics) + " Rate", dps)
        res_.append_array(len(tstamps), 'timestamp', tstamps)

        return res_
Beispiel #7
0
    def _clone(self, inp, src_names, dst_names, res_size, xfrm_len_fn, axis):
        types = {}
        shapes = {}
        for col in range(0, len(dst_names)):
            ser = src_names[col]
            src = inp.array(ser)
            types[dst_names[col]] = src.dtype
            if src.ndim > 1:
                if axis == 1:
                    cols = xfrm_len_fn(src[0])
                    if cols > 1:
                        shapes[dst_names[col]] = (src.shape[0], cols)
                    else:
                        shapes[dst_names[col]] = (src.shape[0])
                else:
                    shapes[dst_names[col]] = src.shape
            else:
                shapes[dst_names[col]] = src.shape

        return DataSet().new(res_size, dst_names, shapes=shapes, types=types)
Beispiel #8
0
 def group(self, series_name, value):
     dataSet = DataSet()
     inp = self.pop()
     grp_ser = inp.array(series_name)
     if value.ndim > 0:
         key = bytearray(value)
         grp_mask = [key == bytearray(ent) for ent in grp_ser]
     else:
         grp_mask = grp_ser == value
     grp_len = len(grp_ser[grp_mask])
     for name in inp.series:
         ser = inp.array(name)
         grp = ser[grp_mask]
         dataSet.append_array(grp_len, name, grp)
     dataSet.set_series_size(grp_len)
     self.push(dataSet)
     return dataSet
Beispiel #9
0
    def _job_summary(self, job_id):
        ''' Get summarized information about jobs across components '''
        where_ = [ [ 'job_id', Sos.COND_EQ, job_id ] ]
        self.src.select(self.metrics,
                        from_ = [ self.schema ],
                        where = where_,
                        order_by = 'job_time_comp'
            )

        memUsedRatio = self._mem_used_ratio()
        if memUsedRatio is None:
            return None
        self.xfrm.push(memUsedRatio)
        res = self.xfrm.min([ 'Mem_Used_Ratio' ], group_name='job_id',
                            keep=['component_id'], xfrm_suffix='')
        self.xfrm.push(memUsedRatio)
        counts = [ len(res) ]
        _max = self.xfrm.max([ 'Mem_Used_Ratio' ], group_name='job_id',
                             keep=['component_id'], xfrm_suffix='')
        res = res.concat(_max)
        counts.append(len(_max))
        i = -2
        mem_used = []
        jid = []
        while i < 3:
            lim = self.mean[[0,0]] + float(i) * self.stdd[[0,0]]
            mem_used.append(lim)
            if i == 0:
                _count = []
            elif i < 0:
                _count = memUsedRatio < ('Mem_Used_Ratio', lim)
            else:
                _count = memUsedRatio > ('Mem_Used_Ratio', lim)

            counts.append(len(_count))
            del _count
            jid.append(job_id)
            i += 1
        _res = DataSet()
        _res.append_array(len(mem_used), 'Mem_Used_Ratio', mem_used)
        _res.append_array(5, 'job_id', jid)
        res = res.concat(_res)
        res.append_array(7, "Analysis", ["Min", "Max", "Stdd-2", "Stdd-1", "Mean", "Stdd+1", "Stdd+2" ])
        res.append_array(7, "Count", counts)
        return res
Beispiel #10
0
    def get_data(self, metrics, job_id=0, user_id=0, params='bins=10'):
        self.bins = 10
        result = []
        datapoints = []
        time_range = self.end - self.start
        offset = time_range * .01
        if offset < 1:
            offset = 1
        where_ = [['timestamp', Sos.COND_GE, self.start - offset],
                  ['timestamp', Sos.COND_LE, self.end + offset]]
        if job_id > 0:
            where_.append(['job_id', Sos.COND_EQ, job_id])
        try:
            self.src.select(metrics + ['timestamp', 'component_id'],
                            from_=[self.schema],
                            where=where_,
                            order_by='time_comp_job')
            inp = None

            # default for now is dataframe - will update with dataset vs dataframe option
            self.xfrm = Transform(self.src, None, limit=self.mdp)
            resp = self.xfrm.begin()
            if resp is None:
                print('resp == None')
                return None

            while resp is not None:
                resp = next(self.xfrm)
                if resp is not None:
                    self.xfrm.concat()
            self.xfrm.dup()
            data = self.xfrm.pop()

            self.xfrm.diff(metrics,
                           group_name="component_id",
                           keep=['timestamp'],
                           xfrm_suffix='')

            data = self.xfrm.pop()
            hsum = None
            data_time = (data.array('timestamp')[-1].astype('float') -
                         data.array('timestamp')[0].astype('float'))
            data_time = data_time / 1000000
            if data_time < time_range:
                bins = int(data_time / time_range * 20)
                if bins < 2:
                    bins = 2
            else:
                bins = 20
            for met_diff in metrics:
                os = data.array(met_diff)

                h = np.histogram(data.array('timestamp').astype('float'),
                                 bins=bins,
                                 weights=os,
                                 density=False)
                if hsum is None:
                    ts = h[1][:-1] / 1000
                    hsum = np.zeros(h[0].shape)
                hsum += h[0]
            res = DataSet()
            res.append_array(len(hsum), str(metrics), hsum)
            res.append_array(len(ts), 'timestamp', ts)
            return res
        except Exception as e:
            a, b, c = sys.exc_info()
            print(str(e) + ' ' + str(c.tb_lineno))
Beispiel #11
0
    def histogram(self,
                  series_list,
                  xfrm_suffix="_hist",
                  bins=10,
                  range=None,
                  weights=None,
                  density=None):
        """Compute the histogram for each series

        Keyword Parameters:
        bins : int or sequence of scalars or str, optional

            If bins is an int, it defines the number of equal-width
            bins in the given range (10, by default). If bins is a
            sequence, it defines the bin edges, including the
            rightmost edge, allowing for non-uniform bin widths.

            If bins is a string, it defines the method used to
            calculate the optimal bin width, as defined by
            numpy.histogram_bin_edges.

        range : (float, float), optional

            The lower and upper range of the bins. If not provided,
            range is simply (series.min(), series.max()). Values
            outside the range are ignored. The first element of the
            range must be less than or equal to the second. range
            affects the automatic bin computation as well. While bin
            width is computed to be optimal based on the actual data
            within range, the bin count will fill the entire range
            including portions containing no data.

        weights : array_like, optional

            An array of weights, of the same shape as the series. Each
            value in the series only contributes its associated weight
            towards the bin count (instead of 1). If density is True,
            the weights are normalized, so that the integral of the
            density over the range remains 1.

        density : bool, optional

            If False, the result will contain the number of samples in
            each bin. If True, the result is the value of the
            probability density function at the bin, normalized such
            that the integral over the range is 1. Note that the sum
            of the histogram values will not be equal to 1 unless bins
            of unity width are chosen; it is not a probability mass
            function.

        Result:

        For each series in the input list, histogram() pushes two
        DataSets; one containing a series of bin values, and the other
        containing an array of bin edges. The bin-edges DataSet has
        one more datum than the bin values.
        """
        hist = DataSet()
        edges = DataSet()
        inp = self.stack.pop()
        series_size = inp.get_series_size()
        for ser in series_list:
            src = inp.array(ser)[0:series_size]
            res = np.histogram(src,
                               bins=bins,
                               range=range,
                               weights=weights,
                               density=density)
            hist.append_array(len(res[0]), ser + xfrm_suffix, res[0])
            edges.append_array(len(res[1]), ser + "_edges", res[1])
        self.stack.push(hist)
        return self.stack.push(edges)
Beispiel #12
0
    def get_lustre_avg(self, metrics):
        try:
            sumbytes = self._sum_metrics(metrics)
            if sumbytes is None:
                return None
            ret_bps = []
            ret_jobs = []
            ret_name = []
            ret_start = []
            ret_end = []
            ret_user = []
            ret_state = []
            ret_size = []
            i = 0
            jids = self.xfrm.job_ids
            res = []
            while i < self.threshold:
                if len(sumbytes) < 1:
                    break
                index, val = max(enumerate(sumbytes),
                                 key=operator.itemgetter(1))
                where_ = [['job_id', Sos.COND_EQ, jids[index]]]
                if self.user_id != 0:
                    where_.append(['uid', Sos.COND_EQ, self.user_id])
                self.src.select(self.job_metrics,
                                from_=['mt-slurm'],
                                where=where_,
                                order_by='job_rank_time')
                job = self.src.get_results()
                res.append(job)
                if job is None:
                    sumbytes = np.delete(sumbytes, index)
                    jids = np.delete(jids, index)
                    continue
                job_start = np.min(job.array('job_start'))
                if job.array('job_end')[0] < 1:
                    job_end = time.time()
                    ret_end.append(job_end * 1000)
                    ret_state.append("In process")
                else:
                    job_end = np.max(job.array('job_end'))
                    ret_end.append(job_end * 1000)
                    ret_state.append("Completed")
                ret_bps.append(val / (job_end - job_start))
                ret_jobs.append(job.array('job_id')[0])
                ret_size.append(job.array('job_size')[0])
                ret_name.append(job.array('job_name')[0].decode())
                ret_start.append(job_start * 1000)
                ret_user.append(job.array('job_user')[0].decode())

                # remove job with highest bps from list of jobs
                sumbytes = np.delete(sumbytes, index)
                jids = np.delete(jids, index)
                i += 1
            res_ = DataSet()
            if not self._meta:
                res_.append_array(len(ret_bps), 'bps', ret_bps)
            else:
                res_.append_array(len(ret_bps), 'ios', ret_bps)
            res_.append_array(len(ret_jobs), 'job_id', ret_jobs)
            res_.append_array(len(ret_size), 'ranks', ret_size)
            res_.append_array(len(ret_name), 'job_name', ret_name)
            res_.append_array(len(ret_user), 'job_user', ret_user)
            res_.append_array(len(ret_start), 'job_start', ret_start)
            res_.append_array(len(ret_end), 'job_end', ret_end)
            res_.append_array(len(ret_state), 'job_state', ret_state)
            return res_
        except Exception as e:
            a, b, c = sys.exc_info()
            print(str(e) + ' ' + str(c.tb_lineno))
            return None
Beispiel #13
0
    def papi_rank_stats(self, xfrm, job):
        try:
            """Return min/max/standard deviation/mean for papi derived metrics"""

            stats = DataSet()
            xfrm.push(job)
            events = job.series
            idx = events.index('rank')
            events = events[idx + 1:]
            # compute the rank containing the minima for each event
            mins = DataSet()
            for name in events:
                xfrm.dup()
                xfrm.min([name], group_name='rank')
                xfrm.minrow(name + '_min')
                xfrm.top().rename('rank', name + '_min_rank')
                mins.append_series(xfrm.pop())

            # compute the rank containing the maxima for each event
            maxs = DataSet()
            for name in events:
                xfrm.dup()
                xfrm.max([name], group_name='rank')
                xfrm.maxrow(name + '_max')
                xfrm.top().rename('rank', name + '_max_rank')
                maxs.append_series(xfrm.pop())

            # compute the standard deviation
            xfrm.dup()
            xfrm.std(events)
            stats.append_series(xfrm.pop())

            # mean
            xfrm.mean(events)
            stats.append_series(xfrm.pop())

            return (events, mins, maxs, stats)
        except Exception as e:
            a, b, c = sys.exc_info()
            print('papi_rank_stats: Error: ' + str(e) + ' ' + str(c.tb_lineno))
            return (None, None, None, None)
Beispiel #14
0
 def get_data(self, metrics=None, job_id=None, user_id=0, params=None):
     try:
         where_ = [
             [ 'timestamp', Sos.COND_GE, self.start ],
             [ 'timestamp', Sos.COND_LE, self.end ],
             [ 'job_start', Sos.COND_GE, 1 ]
         ]
         self.metrics = [ 'job_id', 'job_size', 'uid', 'job_start',
                          'job_end', 'job_status', 'task_exit_status' ]
         self.src.select(self.metrics,
                    from_ = [ 'mt-slurm' ],
                    where = where_,
                    order_by = 'job_rank_time'
         )
         self.xfrm = Transform(self.src, None, limit=1000000)
         res = self.xfrm.begin()
         if not res:
             return None
         while res is not None:
             res = next(self.xfrm)
             if res is not None:
                 self.xfrm.concat()
         result = self.xfrm.pop()
         cols = [ { "text" : "job_id" },
                  { "text" : "CPU Dashboards" },
                  { "text" : "Cache Dashboards" },
                  { "text" : "job_size" },
                  { "text" : "user_id" },
                  { "text" : "job_status" },
                  { "text" : "job_start" },
                  { "text" : "job_end" },
                  { "text" : "task_exit_status" }
                ]
         res_ = DataSet()
         jids = []
         cpu = []
         cache = []
         jsize = []
         uid = []
         jstatus = []
         jstart = []
         jend = []
         texit = []
         i = 0
         while i < result.get_series_size() - 1:
             if result.array('job_id')[i] in jids:
                 pass
             else:
                 jids.append(result.array('job_id')[i])
                 cpu.append('CPU Stats')
                 cache.append('Cache Stats')
                 jsize.append(result.array('job_size')[i])
                 uid.append(result.array('uid')[i])
                 jstatus.append(self.job_status_str[result.array('job_status')[i]])
                 jstart.append(result.array('job_start')[i] * 1000)
                 if result.array('job_end')[i] != 0:
                     jend.append(result.array('job_end')[i] * 1000)
                 else:
                     jend.append(time.time()*1000)
                 texit.append(result.array('task_exit_status')[i])
             i += 1
         res_.append_array(len(jids), 'job_id', jids)
         res_.append_array(len(cpu), 'CPU Stats', cpu)
         res_.append_array(len(cache), 'Cache Stats', cache)
         res_.append_array(len(jsize), 'job_size', jsize)
         res_.append_array(len(uid), 'user_id', uid)
         res_.append_array(len(jstatus), 'job_status', jstatus)
         res_.append_array(len(jstart), 'job_start', jstart)
         res_.append_array(len(jend), 'job_end', jend)
         res_.append_array(len(texit), 'task_exit_status', texit)
         return res_
     except Exception as e:
         a, b, c = sys.exc_info()
         print(str(e)+' '+str(c.tb_lineno))
         return None
Beispiel #15
0
 def get_data(self, metrics, job_id, user_id=0, params=None):
     try:
         result = {}
         columns = [{
             "text": "Metric"
         }, {
             "text": "Min"
         }, {
             "text": "Rank w/Min"
         }, {
             "text": "Max"
         }, {
             "text": "Rank w/Max"
         }, {
             "text": "Mean"
         }, {
             "text": "Standard Deviation"
         }]
         result['columns'] = columns
         if not job_id:
             print('no job_id')
             return None
         xfrm, job = self.derived_metrics(job_id)
         events, mins, maxs, stats = self.papi_rank_stats(xfrm, job)
         res_ = DataSet()
         mins_ = []
         minranks = []
         maxs_ = []
         mxranks = []
         means = []
         stds_ = []
         for name in events:
             mins_.append(np.nan_to_num(mins.array(name + '_min')[0]))
             minranks.append(
                 np.nan_to_num(mins.array(name + '_min_rank')[0]))
             maxs_.append(np.nan_to_num(maxs.array(name + '_max')[0]))
             mxranks.append(np.nan_to_num(
                 maxs.array(name + '_max_rank')[0]))
             means.append(np.nan_to_num(stats.array(name + '_mean')[0]))
             stds_.append(np.nan_to_num(stats.array(name + '_std')[0]))
         res_.append_array(len(events), 'Metric', events)
         res_.append_array(len(mins_), 'Min Value', mins_)
         res_.append_array(len(minranks), 'Rank', mxranks)
         res_.append_array(len(maxs_), 'Max Value', maxs_)
         res_.append_array(len(means), 'Mean Value', means)
         res_.append_array(len(stds_), 'Stdd', stds_)
         return res_
     except Exception as e:
         a, b, c = sys.exc_info()
         print(str(e) + ' ' + str(c.tb_lineno))
         return None