Esempio n. 1
0
    def _createRRD(self, filename):
        """ create an rrd file which fits our requirements """

        # Let's setup some data sources for our RRD
        dss = []
        for source in dataSources:
            dss.append(DS(dsName=source, dsType='GAUGE', heartbeat=900))

        # An now let's setup how our RRD will archive the data
        rras = []
        # 1 days-worth of one-minute samples --> 60/1 * 24
        rra1 = RRA(cf='AVERAGE', xff=0, steps=1, rows=1440)
        # 7 days-worth of five-minute samples --> 60/5 * 24 * 7
        rra2 = RRA(cf='AVERAGE', xff=0, steps=5, rows=2016)
        # 30 days-worth of one hour samples --> 60/60 * 24 * 30
        rra3 = RRA(cf='AVERAGE', xff=0, steps=60, rows=720)
        # 1 year-worth of half day samples --> 60/60 * 24/12 * 365
        rra4 = RRA(cf='AVERAGE', xff=0, steps=720, rows=730)
        rras.extend([rra1, rra2, rra3, rra4])

        # With those setup, we can now created the RRD
        myRRD = RRD(filename,
                    step=step,
                    ds=dss,
                    rra=rras,
                    start=int(time.time()))
        myRRD.create(debug=False)
        return myRRD
Esempio n. 2
0
def create(namerrd,fieldname,starttime,typeofinfo):
    try:
        dataSources = []
        roundRobinArchives = [] 
        dataSources = get_ds(fieldname)
    
        if typeofinfo == 'hardware' :
            dict = {}
            dict = config_info['hardware']
            s = dict['func']
            step = dict['step']
            funcProfile = globals()[s]
            roundRobinArchives = funcProfile()
        
        elif typeofinfo == 'netusage':
            dict = {}
            dict = config_info['netusage']
            s = dict['func']
            step = int(dict['step'])
            funcProfile = globals()[s]
            roundRobinArchives = funcProfile()  
        
        myRRD = RRD(filename=namerrd,ds=dataSources, rra=roundRobinArchives, start=starttime,step=step)
        myRRD.create()
    
        return (True,'Create is successfull.')
    
    except Exception,e:
        
        return (False,str(e))
Esempio n. 3
0
    def __init__(self):
        self.time = 1164783600  # small numbers of seconds since the epoch confuse rrdtool
        self.prevstamptime = int(self.time)

        ds = DataSource(ds_name='utilizationds', ds_type='GAUGE', heartbeat=1)
        rra = RRA(cf='AVERAGE', xff=0.1, steps=1, rows=1200)
        self.rrd = RRD("/tmp/utilization.rrd",
                       ds=[ds],
                       rra=[rra],
                       start=self.time)
        self.rrd.create()

        self.introducer = q = Introducer(self)
        self.all_nodes = [
            Node(randomid(), q, self) for i in range(self.NUM_NODES)
        ]
        q.all_nodes = self.all_nodes
        self.next = []
        self.schedule_events()
        self.verbose = False

        self.added_files = 0
        self.added_data = 0
        self.deleted_files = 0
        self.published_files = []
        self.failed_files = 0
        self.lost_data_bytes = 0  # bytes deleted to make room for new shares
Esempio n. 4
0
    def test_creationDSsAndRRAs(self):
        dss1 = []
        rras1 = []
        filename = '/tmp/test1.rrd'
        dss1.append(DataSource(dsName='speed', dsType='COUNTER',
                               heartbeat=600))
        rras1.append(RRA(cf='AVERAGE', xff=0.5, steps=1, rows=24))
        rras1.append(RRA(cf='AVERAGE', xff=0.5, steps=6, rows=10))
        rrd1 = RRD(filename, ds=dss1, rra=rras1, start=920804400)
        self.assertEqual(repr(rrd1.ds), "[DS:speed:COUNTER:600:U:U]")
        self.assertEqual(repr(rrd1.rra),
                         "[RRA:AVERAGE:0.5:1:24, RRA:AVERAGE:0.5:6:10]")

        filename = '/tmp/test2.rrd'
        rrd2 = RRD(filename, start=920804400)
        self.assertEqual(rrd2.ds, [])
        self.assertEqual(rrd2.rra, [])

        dss3 = []
        rras3 = []
        filename = '/tmp/test3.rrd'
        dss3.append(DataSource(dsName='speed', dsType='COUNTER',
                               heartbeat=300))
        rras3.append(RRA(cf='AVERAGE', xff=0.5, steps=2, rows=24))
        rras3.append(RRA(cf='AVERAGE', xff=0.5, steps=12, rows=10))
        rrd3 = RRD(filename, ds=dss3, rra=rras3, start=920804400)

        self.assertEqual(repr(rrd3.ds), "[DS:speed:COUNTER:300:U:U]")
        self.assertEqual(repr(rrd3.rra),
                         "[RRA:AVERAGE:0.5:2:24, RRA:AVERAGE:0.5:12:10]")
Esempio n. 5
0
def RrdCreate(rrdfile):
    '''Creates a RRD database.'''
    dataSources = []
    roundRobinArchives = []
    dataSources.append(DataSource(
        dsName='temperature', dsType='GAUGE', heartbeat=600,
        minval=-50, maxval=100))
    dataSources.append(DataSource(
        dsName='humidity', dsType='GAUGE', heartbeat=600,
        minval=0, maxval=100))
    dataSources.append(DataSource(
        dsName='mq9', dsType='GAUGE', heartbeat=600))
    dataSources.append(DataSource(
        dsName='dust_pc', dsType='GAUGE', heartbeat=600, minval=0))
    dataSources.append(DataSource(
        dsName='dust_raw', dsType='GAUGE', heartbeat=600))
    # Keep all values for 10 days
    roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=1,
                                  rows=10*24*60))
    # Keep 15-minute averages for one year days
    roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=15,
                                  rows=365*24*4))
    # Keep 1-hour averages for 10 years
    roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=60,
                                  rows=10*365*24))
    myRRD = RRD(
        rrdfile, step=60, ds=dataSources, rra=roundRobinArchives)
    myRRD.create()
Esempio n. 6
0
def graph(req, rrd):
        if os.path.isfile(rrdPath+rrd):
                filename = rrd
                rrd = RRD(rrdPath+rrd, mode='r')
                info = rrd.getData()
                info['filename'] = filename
                return render_to_response('rrd/graph.html', {'info': info})
Esempio n. 7
0
    def rrdtool_log(self, count, category, key):
        """ Log a message to an category's corresponding rrdtool databse """

        # rrdtool doesn't like spaces
        key = key.replace(' ', '_')

        filename = rrd_dir + '/' + category + '/' + key + '.rrd'

        if not category in rrd_categories:
            raise ValueError, "Invalid category %s" % category

        if not os.path.isfile(filename):
            self.rrdtool_create(filename)
            # rrdtool complains if you stuff data into a freshly created
            # database less than one second after you created it.  We could do a
            # number of things to mitigate this:
            #   - sleep for 1 second here
            #   - return from this function and not log anything only on the
            #     first time we see a new data key (a new country, a new
            #     filename).
            #   - pre-create our databases at startup based on magical knowledge
            #     of what keys we're going to see coming over the AMQP line
            #
            # For now, we're just going to return.
            return

        # TODO -- Is this an expensive operation (opening the RRD)?  Can we make
        # this happen less often?
        rrd = RRD(filename)

        rrd.bufferValue(str(int(time.time())), str(count))

        # This flushes the values to file.
        # TODO -- Can we make this happen less often?
        rrd.update()
Esempio n. 8
0
    def create(self):
        """ 
        Creates a new RRD database 
        """
        ds1 = DS(dsName=self.value_name, dsType=self.type, heartbeat=self.heartbeat)
        dss = [ds1]

        rras = []
        # 1 days-worth of n heartbeat samples --> 60/1 * 24
        rra1 = RRA(cf="AVERAGE", xff=0.5, steps=1, rows=int(self.heartbeat / 1.0 * 24))
        # 7 days-worth of n heartbeat samples --> 60/5 * 24 * 7
        rra2 = RRA(cf="AVERAGE", xff=0.5, steps=5, rows=int(self.heartbeat / 5.0 * 24 * 7))
        # 30 days-worth of n heartbeat samples --> 60/60 * 24 * 30
        rra3 = RRA(cf="AVERAGE", xff=0.5, steps=60, rows=int(self.heartbeat / 60.0 * 24 * 30))
        # 365 days worth of n heartbeat samples --> 60/120 * 24 * 365
        rra4 = RRA(cf="AVERAGE", xff=0.5, steps=120, rows=int(self.heartbeat / 120.0 * 24 * 365))
        # 10 years worth of n heartbeat samples --> 60/180 * 24 * 365 * 10
        rra5 = RRA(cf="AVERAGE", xff=0.5, steps=180, rows=int(self.heartbeat / 180.0 * 24 * 365 * 10))

        rras.extend([rra1, rra2, rra3, rra4, rra5])

        rrd = RRD(
            os.path.join("history/", "%s.rrd" % self.value_id), step=self.heartbeat, ds=dss, rra=rras, start=self.time
        )
        rrd.create(debug=False)
Esempio n. 9
0
def RrdCreate(rrdfile):
    '''Creates a RRD database.'''
    dataSources = []
    roundRobinArchives = []
    dataSources.append(
        DataSource(dsName='temperature',
                   dsType='GAUGE',
                   heartbeat=600,
                   minval=-50,
                   maxval=100))
    dataSources.append(
        DataSource(dsName='humidity',
                   dsType='GAUGE',
                   heartbeat=600,
                   minval=0,
                   maxval=100))
    dataSources.append(DataSource(dsName='mq9', dsType='GAUGE', heartbeat=600))
    dataSources.append(
        DataSource(dsName='dust_pc', dsType='GAUGE', heartbeat=600, minval=0))
    dataSources.append(
        DataSource(dsName='dust_raw', dsType='GAUGE', heartbeat=600))
    # Keep all values for 10 days
    roundRobinArchives.append(
        RRA(cf='AVERAGE', xff=0.5, steps=1, rows=10 * 24 * 60))
    # Keep 15-minute averages for one year days
    roundRobinArchives.append(
        RRA(cf='AVERAGE', xff=0.5, steps=15, rows=365 * 24 * 4))
    # Keep 1-hour averages for 10 years
    roundRobinArchives.append(
        RRA(cf='AVERAGE', xff=0.5, steps=60, rows=10 * 365 * 24))
    myRRD = RRD(rrdfile, step=60, ds=dataSources, rra=roundRobinArchives)
    myRRD.create()
Esempio n. 10
0
 def load_rrd(cls, filepath, options, default_options):
     take_param = lambda k: (k, options[k]
                             if k in options else default_options.get(k))
     kargs = dict(map(take_param, ['start', 'end', 'resolution', 'cf']))
     rrd = RRD(filepath, mode='r', backend=bindings)
     rrd_data = rrd.fetch(**kargs)
     return rrd_data.get('42')
Esempio n. 11
0
def graph(req, rrd):
    if os.path.isfile(rrdPath + rrd):
        filename = rrd
        rrd = RRD(rrdPath + rrd, mode='r')
        info = rrd.getData()
        info['filename'] = filename
        return render_to_response('rrd/graph.html', {'info': info})
Esempio n. 12
0
class RRDB(object):

  def __init__(self, filename):
    self.db = RRD(filename)

  def store(self, values):
    self.db.bufferValue(int(time.time()), *values)
    self.db.update()

  @classmethod
  def generate_archives(cls, step, rows=1440,
                        day_periods=[2, 14, 60, 180, 720]):
    rras = []
    for days in day_periods:
      # how many primary data points (we get one each step)
      # go into a consolidated data point
      PDPs = 86400 * days / step / rows
      rras.extend([
        RRA(cf='AVERAGE', xff=0.1, rows=rows, steps=PDPs),
        RRA(cf='MIN', xff=0.1, rows=rows, steps=PDPs),
        RRA(cf='MAX', xff=0.1, rows=rows, steps=PDPs),
      ])
    return rras

  @classmethod
  def create_db(cls):
    raise NotImplementedError("Create DB is not implemented")

  def graph(self, outfile):
    raise NotImplementedError("graph method should be overriden")
Esempio n. 13
0
def drawsimplegraph(req, rrdpathname, rrd, ds, rra, height=600, width=1200, start='default', end='default' ):
	filename = Rrdpath.objects.get(name=str(rrdpathname)).path 
	fullfilename = filename + '/' + rrd
	if os.path.isfile(fullfilename):
                rrdobj = RRD(fullfilename, mode='r')
		info = rrdobj.getData()
		ds = str(ds)
		rra = str(rra)
		#step = info['step']
		if (start == 'default'):
			start = info['lastupdate'] - (7 * 86400)
		if (end == 'default'):
			end = info['lastupdate']
		gitems = []
                gitems.append(DEF(rrdfile=fullfilename, vname="fudge", dsName=ds))
		gitems.append(CDEF(vname='kmh', rpn='%s' % gitems[0].vname))
		gitems.append(AREA(defObj=gitems[1], color='#006600', legend=rrd+" "+str(start)+" "+str(end)))
		g = Graph('-', imgformat='png', start=str(start), end=str(end), vertical_label=ds)
		g.title = rrd + '/' +  ds + '/' + rra
		g.full_size_mode = True
		g.only_graph = req.GET.get('onlygraph', False)
		g.no_legend = req.GET.get('nolegend', True)
		g.height = req.GET.get('height', 600)
		g.width = req.GET.get('width', 1200)
		g.data.extend(gitems)
		a = g.write()
    		return HttpResponse(a,content_type="image/png")
Esempio n. 14
0
    def _rrdtool_log(self, count, filename):
        """ Workhorse for rrdtool logging.  Shouldn't be called directly. """

        if not os.path.isfile(filename):
            self.rrdtool_create(filename)
            # rrdtool complains if you stuff data into a freshly created
            # database less than one second after you created it.  We could do a
            # number of things to mitigate this:
            #   - sleep for 1 second here
            #   - return from this function and not log anything only on the
            #     first time we see a new data key (a new country, a new
            #     filename).
            #   - pre-create our databases at startup based on magical knowledge
            #     of what keys we're going to see coming over the AMQP line
            #
            # For now, we're just going to return.
            return

        # TODO -- Is this an expensive operation (opening the RRD)?  Can we make
        # this happen less often?
        rrd = RRD(filename)

        rrd.bufferValue(str(int(time.time())), str(count))

        # This flushes the values to file.
        # TODO -- Can we make this happen less often?
        rrd.update()
Esempio n. 15
0
    def main(self, argv):
        """
        Create an RRD file with values 0-9 entered at 1 second intervals from
        1980-01-01 00:00:00 (the first date that rrdtool allows)
        """
        from pyrrd.rrd import DataSource, RRA, RRD
        start = int(datetime(1980, 1, 1, 0, 0).strftime('%s'))
        dss = []
        rras = []
        filename = os.path.join(self.build_dir, 'test.rrd')

        rows = 12
        step = 10

        dss.append(
            DataSource(dsName='speed', dsType='GAUGE', heartbeat=2 * step))
        rras.append(RRA(cf='AVERAGE', xff=0.5, steps=1, rows=rows))
        rras.append(RRA(cf='AVERAGE', xff=0.5, steps=12, rows=rows))
        my_rrd = RRD(filename, ds=dss, rra=rras, start=start, step=step)
        my_rrd.create()

        for i, t in enumerate(
                range(start + step, start + step + (rows * step), step)):
            self.log.debug('DATA: %s %s (%s)' %
                           (t, i, datetime.fromtimestamp(t)))
            my_rrd.bufferValue(t, i)

        # Add further data 1 second later to demonstrate that the rrd
        # lastupdatetime does not necessarily fall on a step boundary
        t += 1
        i += 1
        self.log.debug('DATA: %s %s (%s)' % (t, i, datetime.fromtimestamp(t)))
        my_rrd.bufferValue(t, i)

        my_rrd.update()
Esempio n. 16
0
 def init_rdd(self):
     #   Initiates RRD-archive
     #   Creates the new one if absent or need to reset
     filename = options.rrd_file
     if not options.rrd_reset and access(filename, F_OK):
         myRRD = RRD(filename)
     else:
         heartbeat=options.stats_period*2
         dataSources = [
             DataSource(dsName='agents_u', dsType='ABSOLUTE', heartbeat=heartbeat),
             DataSource(dsName='t_unique', dsType='ABSOLUTE', heartbeat=heartbeat),
             DataSource(dsName='t_started', dsType='ABSOLUTE', heartbeat=heartbeat),
             DataSource(dsName='t_completed', dsType='ABSOLUTE', heartbeat=heartbeat),
             DataSource(dsName='t_failed', dsType='ABSOLUTE', heartbeat=heartbeat),
             DataSource(dsName='bytes', dsType='ABSOLUTE', heartbeat=heartbeat),
             DataSource(dsName='cpu', dsType='DERIVE', heartbeat=heartbeat,minval=0),
             DataSource(dsName='duration', dsType='ABSOLUTE', heartbeat=heartbeat),
             DataSource(dsName='duration_avg', dsType='GAUGE', heartbeat=heartbeat),
         ]
         roundRobinArchives = []
         for (_steps, _rows) in options.rrd_rra:
             roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=_steps, rows=_rows))
             roundRobinArchives.append(RRA(cf='MAX', xff=0.5, steps=_steps, rows=_rows))
         myRRD = RRD(filename, ds=dataSources, rra=roundRobinArchives, step=options.stats_period)
         myRRD.create(debug=True)
     return myRRD
Esempio n. 17
0
    def create(self):

        if os.path.exists(self.rrdfile):
            self.rrd = RRD(self.rrdfile)
            return
        
        dss = []
        
        ds1 = DS(dsName="requests", dsType="COUNTER",  heartbeat=120, minval=0, maxval=100000000)
        ds2 = DS(dsName="connections", dsType="ABSOLUTE",  heartbeat=120, minval=0, maxval=60000)
        ds3 = DS(dsName="reading", dsType="ABSOLUTE",  heartbeat=120, minval=0, maxval=60000)
        ds4 = DS(dsName="writing", dsType="ABSOLUTE",  heartbeat=120, minval=0, maxval=60000)
        ds5 = DS(dsName="waiting", dsType="ABSOLUTE",  heartbeat=120, minval=0, maxval=60000)
        dss.extend([ds1,ds2,ds3,ds4,ds5])
        
        rras = []
        rra1 = RRA(cf="AVERAGE", xff=0.5, steps=1, rows=2880)    	
        rra2 = RRA(cf="AVERAGE", xff=0.5, steps=30, rows=672)
        rra3 = RRA(cf="AVERAGE", xff=0.5, steps=120, rows=732)
        rra4 = RRA(cf="AVERAGE", xff=0.5, steps=720, rows=1460)
        rras.extend([rra1, rra2, rra3, rra4])
        
        self.rrd = RRD(self.rrdfile, step=60, ds=dss, rra=rras)
        self.rrd.create(debug=False)
        time.sleep(2)
Esempio n. 18
0
def data(req, rrd, ds, rra):
    if os.path.isfile(rrdPath + rrd):
        rrd = RRD(rrdPath + rrd, mode='r')
        info = rrd.getData()
        step = info[rra]['step']
        start = info['lastupdate'] - info[rra]['rows'] * step
        data = rrd.fetch(resolution=step, start=start, end=info['lastupdate'])
        return HttpResponse(simplejson.dumps(data))
Esempio n. 19
0
 def insert(self):
     """ 
     Inserts new data in the RRD database 
     """
     rrd = RRD(os.path.join("history/", "%s.rrd" % self.value_id))
     rrd.bufferValue(self.time, self.value_value)
     rrd.update()
     print self.time, self.value_value
Esempio n. 20
0
def data(req, rrd, ds, rra):
        if os.path.isfile(rrdPath+rrd):
                rrd = RRD(rrdPath+rrd, mode='r')
		info = rrd.getData()
		step = info[rra]['step']
		start = info['lastupdate'] - info[rra]['rows']*step
                data = rrd.fetch(resolution=step, start=start, end=info['lastupdate'])
        	return HttpResponse(simplejson.dumps(data))
Esempio n. 21
0
 def setUp(self):
     ds = [DataSource(dsName="speed", dsType="COUNTER", heartbeat=600)]
     rra = [
         RRA(cf="AVERAGE", xff=0.5, steps=1, rows=24),
         RRA(cf="AVERAGE", xff=0.5, steps=6, rows=10)
     ]
     self.rrdfile = tempfile.NamedTemporaryFile()
     self.rrd = RRD(self.rrdfile.name, ds=ds, rra=rra, start=920804400)
     self.rrd.create()
Esempio n. 22
0
def oldgraph(req, rrdpathname, rrd):
	filename = Rrdpath.objects.get(name=str(rrdpathname)).path 
	fullfilename = filename + '/' + rrd
	if os.path.isfile(fullfilename):
                rrdobj = RRD(fullfilename, mode='r')
                info = rrdobj.getData()
                info['filename'] = filename
		info['rrdpathname'] = str(rrdpathname)
		info['rrd'] = rrd
                return render_to_response('pydrraw/graph.html', {'info': info})
Esempio n. 23
0
def raw(req, rrdpathname, rrd):
	filename = Rrdpath.objects.get(name=str(rrdpathname)).path 
	fullfilename = filename + '/' + rrd
	if os.path.isfile(fullfilename):
                myrrd = RRD(fullfilename, mode='r')
                info = myrrd.getData()
		info['rrdpathname'] = rrdpathname
		info['rrd'] = rrd
		info['filename'] = filename
        	return render_to_response('pydrraw/raw.html', {'info': info})
Esempio n. 24
0
def infoview(req, rrdpathname, rrd):
	filename = Rrdpath.objects.get(name=str(rrdpathname)).path 
	fullfilename = filename + '/' + rrd
	if os.path.isfile(fullfilename):
		rrd = RRD(fullfilename, mode='r')
		info = rrd.getData()
		info['filename'] = filename
		#info['dslength'] = 5
		info['rrdpathname'] = str(rrdpathname)
		return render_to_response('pydrraw/info.html', {'info': info})
Esempio n. 25
0
    def main(self, argv):
        """
        Create an RRD file with values 0-9 entered at 1 second intervals from
        1980-01-01 00:00:00 (the first date that rrdtool allows)
        """
        from pyrrd.rrd import DataSource, RRA, RRD
        start = int(datetime(1980, 1, 1, 0, 0).strftime('%s'))
        dss = []
        rras = []
        filename = os.path.join(self.build_dir, 'test.rrd')

        rows = 12
        step = 10

        dss.append(
            DataSource(dsName='speed', dsType='GAUGE', heartbeat=2 * step))
        rras.append(RRA(cf='AVERAGE', xff=0.5, steps=1, rows=rows))
        rras.append(RRA(cf='AVERAGE', xff=0.5, steps=12, rows=rows))
        my_rrd = RRD(filename, ds=dss, rra=rras, start=start, step=step)
        my_rrd.create()

        for i, t in enumerate(
            range(start + step, start + step + (rows * step), step)):
            self.log.debug(
                'DATA: %s %s (%s)' % (t, i, datetime.fromtimestamp(t)))
            my_rrd.bufferValue(t, i)

        # Add further data 1 second later to demonstrate that the rrd
        # lastupdatetime does not necessarily fall on a step boundary
        t += 1
        i += 1
        self.log.debug('DATA: %s %s (%s)' % (t, i, datetime.fromtimestamp(t)))
        my_rrd.bufferValue(t, i)

        my_rrd.update()
Esempio n. 26
0
def RrdProcess(rrdfile, samples):
    '''Reads given samples and stores them in the RRD database.'''
    # TODO: Optionally update the database only periodically.
    rrd = RRD(rrdfile)
    for sample in samples:
        logging.debug("Saving sample %s", sample)
        rrd.bufferValue(sample.time, sample.temperature, sample.humidity,
                        sample.mq9, sample.dust_pc, sample.dust_raw)
        rrd.update(debug=True)
        # Flush the print statements executed so far.
        sys.stdout.flush()
Esempio n. 27
0
def data(req, rrdpathname, rrd, ds, rra):
	filename = Rrdpath.objects.get(name=str(rrdpathname)).path 
	fullfilename = filename + '/' + rrd
	if os.path.isfile(fullfilename):
                rrd = RRD(fullfilename, mode='r')
		info = rrd.getData()
		step = info['step']
		#this should be the pdp maybe
		start = info['lastupdate'] - 100*step
                #data = rrd.fetch(resolution=step, start=start, end=info['lastupdate'])
                data = rrd.fetch(resolution=step, start=start, end=info['lastupdate'])
        	return HttpResponse(simplejson.dumps(data))
Esempio n. 28
0
def main(args):

    filename = 'test.rrd'

    if not os.path.exists(filename):
        dataSources = []
        roundRobinArchives = []

        dataSource = DataSource(dsName='speed',
                                dsType='COUNTER',
                                heartbeat=600)
        print "dataSource.name:", dataSource.name
        dataSources.append(dataSource)

        roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=1, rows=24))
        roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=6, rows=10))

        myRRD = RRD(filename,
                    ds=dataSources,
                    rra=roundRobinArchives,
                    start=time.time())
        myRRD.create()
        #myRRD.update()
        #write_2_file(myRRD)

    else:

        import random

        myRRD = RRD(filename)
        myRRD.bufferValue(time.time(), random.randrange(12393, 12423))

        #=======================================================================
        # myRRD.bufferValue('920805900', '12363')
        # myRRD.bufferValue('920806200', '12373')
        # myRRD.bufferValue('920806500', '12383')
        # myRRD.update()
        #
        # myRRD.bufferValue('920806800', '12393')
        # myRRD.bufferValue('920807100', '12399')
        # myRRD.bufferValue('920807400', '12405')
        # myRRD.bufferValue('920807700', '12411')
        # myRRD.bufferValue('920808000', '12415')
        # myRRD.bufferValue('920808300', '12420')
        # myRRD.bufferValue('920808600', '12422')
        # myRRD.bufferValue('920808900', '12423')
        #=======================================================================
        myRRD.update()

        #write_2_file(myRRD)

        print os.path.isfile(filename)
        print len(open(filename).read())
Esempio n. 29
0
 def create_rrd(self, gpuid):
     ret = RRD('/'.join([self.rrd_dir, '%s.rrd' % gpuid]),
               ds=[
                   DataSource(dsName='utilization',
                              dsType='GAUGE',
                              heartbeat=10)
               ],
               rra=[RRA(cf='MIN', xff=0.5, steps=1, rows=360)],
               step=10,
               start=int(time()))
     ret.create()
     return ret
Esempio n. 30
0
 def dumpfileinfo(self):
     #rrds = os.walk(self.path)
     mylist = []
     for root, dir, files in os.walk(self.path):
         for sfile in files:
             filename = os.path.join(root, sfile)
             subpath = filename[len(self.path):]
             rrd = RRD(filename, mode='r')
             info = rrd.getData()
             for i in rrd.ds:
                 mylist.append((self, subpath, sfile, i.name), )
     return mylist
Esempio n. 31
0
	def dumpfileinfo(self):
	        #rrds = os.walk(self.path)
		mylist = []
		for root, dir, files in os.walk(self.path):
        		for sfile in files:
				filename = os.path.join(root, sfile)
				subpath = filename[len(self.path):]
		                rrd = RRD(filename, mode='r')
		                info = rrd.getData()
				for i in rrd.ds:
					mylist.append((self,subpath,sfile,i.name),)
		return mylist
Esempio n. 32
0
def create(stringName, key):
        if debug: print "Enter Function create(stringName, key)"
	# Let's create and RRD file and dump some data in it
	dss = []
	ds1 = DS(dsName='kW', dsType='GAUGE', heartbeat=600) #alle 10 Minuten einen Wert
	dss.append(ds1)

        rras = [] #round robin archives mit, xff=0.5 also wenn 20 Minuten kein wert kommt wirds leer angezeigt:
	rra1 = RRA(cf='AVERAGE', xff=0.5, steps=1, rows=144) #alle 10 Minuten ein Wert
	rra2 = RRA(cf='AVERAGE', xff=0.5, steps=6, rows=24)  #24h mal 1h
	rra3 = RRA(cf='AVERAGE', xff=0.5, steps=24, rows=30) #30 Tage mal 24h
	rra4 = RRA(cf='AVERAGE', xff=0.5, steps=30, rows=12) #12 Monate mal 30 Tage
	rra5 = RRA(cf='AVERAGE', xff=0.5, steps=12, rows=10) #10 Jahre mal 12 Monate
	rras.append(rra1)
	rras.append(rra2)
	rras.append(rra3)
	rras.append(rra4)
	rras.append(rra5)

        #round robbin database file anlegen mit der Startzeit startTime (jetzt)
	#myRRD = RRD(baseDir + stringName + "_" + key + ".rrd", ds=dss, rra=rras, start=startTime)
	myRRD = RRD(baseDir + stringName + "_" + key + ".rrd", ds=dss, rra=rras, start=1483228800)
	myRRD.create()

	myRRD.update()
        if debug: myRRD.info()
Esempio n. 33
0
def create_rrd(filename, args):
    dses = []
    rras = []

    for ds in args['ds']:
        dses.append(DataSource(dsName=ds, dsType=args['ds'][ds],
                               heartbeat=600))

    rras.append(RRA(cf='AVERAGE', xff=0.5, steps=1, rows=288))
    rras.append(RRA(cf='AVERAGE', xff=0.5, steps=12, rows=744))
    rras.append(RRA(cf='AVERAGE', xff=0.5, steps=24, rows=1116))
    rras.append(RRA(cf='AVERAGE', xff=0.5, steps=48, rows=2191))
    myRRD = RRD(filename, ds=dses, rra=rras, start=int(now) - 60)
    myRRD.create()
Esempio n. 34
0
class ExternalBackendTestCase(TestCase):
    def setUp(self):
        ds = [DataSource(dsName="speed", dsType="COUNTER", heartbeat=600)]
        rra = [
            RRA(cf="AVERAGE", xff=0.5, steps=1, rows=24),
            RRA(cf="AVERAGE", xff=0.5, steps=6, rows=10)
        ]
        self.rrdfile = tempfile.NamedTemporaryFile()
        self.rrd = RRD(self.rrdfile.name, ds=ds, rra=rra, start=920804400)
        self.rrd.create()

    def test_updateError(self):
        self.rrd.bufferValue(1261214678, 612)
        self.rrd.bufferValue(1261214678, 612)
        self.assertRaises(ExternalCommandError, self.rrd.update)
        expected = ("illegal attempt to update using time 1261214678 "
                    "when last update time is 1261214678 (minimum one second "
                    "step)")
        try:
            self.rrd.update()
        except ExternalCommandError as error:
            self.assertTrue(str(error).startswith("ERROR:"))
            self.assertTrue(str(error).endswith(expected))

    def test_infoWriteMode(self):
        expectedOutput = """
            rra = [{'rows': 24, 'database': None, 'cf': 'AVERAGE', 'cdp_prep': None, 'beta': None, 'seasonal_period': None, 'steps': 1, 'window_length': None, 'threshold': None, 'alpha': None, 'pdp_per_row': None, 'xff': 0.5, 'ds': [], 'gamma': None, 'rra_num': None}, {'rows': 10, 'database': None, 'cf': 'AVERAGE', 'cdp_prep': None, 'beta': None, 'seasonal_period': None, 'steps': 6, 'window_length': None, 'threshold': None, 'alpha': None, 'pdp_per_row': None, 'xff': 0.5, 'ds': [], 'gamma': None, 'rra_num': None}]
            filename = /tmp/tmpQCLRj0
            start = 920804400
            step = 300
            values = []
            ds = [{'name': 'speed', 'min': 'U', 'max': 'U', 'unknown_sec': None, 'minimal_heartbeat': 600, 'value': None, 'rpn': None, 'type': 'COUNTER', 'last_ds': None}]
            ds[speed].name = speed
            ds[speed].min = U
            ds[speed].max = U
            ds[speed].minimal_heartbeat = 600
            ds[speed].type = COUNTER
            rra[0].rows = 24
            rra[0].cf = AVERAGE
            rra[0].steps = 1
            rra[0].xff = 0.5
            rra[0].ds = []
            rra[1].rows = 10
            rra[1].cf = AVERAGE
            rra[1].steps = 6
            rra[1].xff = 0.5
            rra[1].ds = []
            """.strip().split("\n")
        originalStdout = sys.stdout
        sys.stdout = StringIO()
        self.assertTrue(os.path.exists(self.rrdfile.name))
        self.rrd.info()
        for obtained, expected in zip(sys.stdout.getvalue().split("\n"),
                                      expectedOutput):
            if obtained.startswith("filename"):
                self.assertTrue(expected.strip().startswith("filename"))
            else:
                self.assertEquals(obtained.strip(), expected.strip())
        sys.stdout = originalStdout
Esempio n. 35
0
 def create_db(cls, filename, step, start,
               interface_speeds={"eth0": 6 * 1024**2 / 8, # 6Mbit/s
                                 "wlan0": 300 * 1024**2 / 8 }):
   dss = []
   for iface, speed in interface_speeds.items():
     dss.extend([
       DataSource(dsName="%s_out" % iface, dsType='COUNTER',
                  heartbeat=3*step, minval=0, maxval=speed),
       DataSource(dsName="%s_in" % iface, dsType='COUNTER',
                  heartbeat=3*step, minval=0, maxval=speed)
     ])
   db = RRD(filename, ds=dss, rra=cls.generate_archives(step),
            start=start, step=step)
   db.create()
   return db
Esempio n. 36
0
def create_rrd(filename, args):
    dses = []
    rras = []

    for ds in args['ds']:
        dses.append(
        DataSource( dsName=ds, dsType=args['ds'][ds], heartbeat=600))
    
    rras.append(RRA(cf='AVERAGE', xff=0.5, steps=1, rows=288))
    rras.append(RRA(cf='AVERAGE', xff=0.5, steps=12, rows=744))
    rras.append(RRA(cf='AVERAGE', xff=0.5, steps=24, rows=1116))
    rras.append(RRA(cf='AVERAGE', xff=0.5, steps=48, rows=2191))
    myRRD = RRD(
        filename, ds=dses, rra=rras, start=int(now)-60)
    myRRD.create()
Esempio n. 37
0
 def render_GET(self, request):
     
     self.request = request
     type = request.args["type"][0]
     period = request.args["period"][0]
     history_id = request.args["history_id"][0]
     
     if type == "gauge":
         rrd = RRD("history/%s.rrd" % history_id)
         result = rrd.fetch(resolution=60, start=period, end='now')
         
         clockfix = (datetime.datetime.now().hour - datetime.datetime.utcnow().hour) * 3600
         
         series = [((ts + clockfix) * 1000, val) for ts, val in result["data"]]
         
     return json.dumps(series)
Esempio n. 38
0
class Storage:
    # our storage object
    _rrd = None

    def __init__(self, filename="heatpumpMonitor.rrd"):
        if not os.path.isfile(filename):
            self._rrd = self._createRRD(filename)
        else:
            self._rrd = RRD(filename)

    def _createRRD(self, filename):
        """ create an rrd file which fits our requirements """

        # Let's setup some data sources for our RRD
        dss = []
        for source in dataSources:
            dss.append(DS(dsName=source, dsType='GAUGE', heartbeat=900))

        # An now let's setup how our RRD will archive the data
        rras = []
        # 1 days-worth of one-minute samples --> 60/1 * 24
        rra1 = RRA(cf='AVERAGE', xff=0, steps=1, rows=1440)
        # 7 days-worth of five-minute samples --> 60/5 * 24 * 7
        rra2 = RRA(cf='AVERAGE', xff=0, steps=5, rows=2016)
        # 30 days-worth of one hour samples --> 60/60 * 24 * 30
        rra3 = RRA(cf='AVERAGE', xff=0, steps=60, rows=720)
        # 1 year-worth of half day samples --> 60/60 * 24/12 * 365
        rra4 = RRA(cf='AVERAGE', xff=0, steps=720, rows=730)
        rras.extend([rra1, rra2, rra3, rra4])

        # With those setup, we can now created the RRD
        myRRD = RRD(filename,
                    step=step,
                    ds=dss,
                    rra=rras,
                    start=int(time.time()))
        myRRD.create(debug=False)
        return myRRD

    def add(self, aDict):
        """ adds the provided values to the rrd database with the current datetime """
        # we need to put the dict an correct line
        tmp = []
        for source in dataSources:
            tmp.append(aDict.get(source) or "U")
        self._rrd.bufferValue(int(time.time()), *tmp)
        self._rrd.update(debug=False)
Esempio n. 39
0
    def rrdtool_create(self, filename):
        """ Create an rrdtool database if it doesn't exist """

        # Create the directory if it doesn't exist.
        directory = "/".join(filename.split("/")[:-1])
        if not os.path.isdir(directory):
            os.makedirs(directory)

        # Step interval for Primary Data Points (pdp)
        pdp_step = self.frequency.seconds + (self.frequency.days * 86400)

        # Heartbeat can be 'whatev', but twice the pdpd_step is good
        heartbeat = 2 * pdp_step

        # We only keep a single simple datasource.
        sources = [DataSource(dsName="sum", dsType="GAUGE", heartbeat=heartbeat)]

        # TODO -- this should be a user-definable number.  It is equivalent to
        # "how many data points do I want to see on any one graph at any given
        # time."  The higher it is, the cooler your graphs look.  The higher it
        # is, the more disk space is consumed.  The higher it is, the more
        # memory is consumed client-side.
        target_resolution = 60

        # This function calculates how many PDP steps should be involved in the
        # calculation of a single Consolidated Data Point (CDP).
        cdp_steps = lambda tspan: (tspan / pdp_step) / target_resolution

        # Just a lookup of how many seconds per 'timespan'
        timespans = {"hour": 3600, "day": 86400, "week": 604800, "month": 2629744, "quarter": 7889231, "year": 31556926}

        self.log.info("Building rrd %s.  %i cdp steps per hour." % (filename, cdp_steps(timespans["hour"])))

        # Here we build a series of round robin Archives of various resolutions
        # and consolidation functions
        archives = []
        for consolidation_function in ["AVERAGE", "MAX"]:
            archives += [
                RRA(cf=consolidation_function, xff=0.5, rows=target_resolution, steps=cdp_steps(seconds_per_timespan))
                for name, seconds_per_timespan in timespans.iteritems()
            ]

        # Actually build the round robin database from the parameters we built
        rrd = RRD(filename, start=int(time.time()), step=pdp_step, ds=sources, rra=archives)
        rrd.create()
Esempio n. 40
0
 def ensure_rrd(self):
     """
     Ensures that an RRD file is created.
     """
     if os.path.isfile(self.filename):
         # the rrd file alread exist
         self.rrd = RRD(self.filename)
     else:
         self.create_rrd()
Esempio n. 41
0
 def test_creationDefaults(self):
     filename = '/tmp/test.rrd'
     rrd = RRD(filename, start=920804400)
     self.assertEqual(rrd.filename, filename)
     self.assertEqual(rrd.ds, [])
     self.assertEqual(rrd.rra, [])
     self.assertEqual(rrd.values, [])
     self.assertEqual(rrd.step, 300)
     self.assertEqual(rrd.lastupdate, None)
Esempio n. 42
0
 def setUp(self):
     ds = [
         DataSource(dsName="speed", dsType="COUNTER", heartbeat=600)]
     rra = [
         RRA(cf="AVERAGE", xff=0.5, steps=1, rows=24),
         RRA(cf="AVERAGE", xff=0.5, steps=6, rows=10)]
     self.rrdfile = tempfile.NamedTemporaryFile()
     self.rrd = RRD(self.rrdfile.name, ds=ds, rra=rra, start=920804400)
     self.rrd.create()
Esempio n. 43
0
    def create(self):
        """ 
        Creates a new RRD database 
        """
        ds1 = DS(dsName=self.value_name,
                 dsType=self.type,
                 heartbeat=self.heartbeat)
        dss = [ds1]

        rras = []
        # 1 days-worth of n heartbeat samples --> 60/1 * 24
        rra1 = RRA(cf='AVERAGE',
                   xff=0.5,
                   steps=1,
                   rows=int(self.heartbeat / 1.0 * 24))
        # 7 days-worth of n heartbeat samples --> 60/5 * 24 * 7
        rra2 = RRA(cf='AVERAGE',
                   xff=0.5,
                   steps=5,
                   rows=int(self.heartbeat / 5.0 * 24 * 7))
        # 30 days-worth of n heartbeat samples --> 60/60 * 24 * 30
        rra3 = RRA(cf='AVERAGE',
                   xff=0.5,
                   steps=60,
                   rows=int(self.heartbeat / 60.0 * 24 * 30))
        # 365 days worth of n heartbeat samples --> 60/120 * 24 * 365
        rra4 = RRA(cf='AVERAGE',
                   xff=0.5,
                   steps=120,
                   rows=int(self.heartbeat / 120.0 * 24 * 365))
        # 10 years worth of n heartbeat samples --> 60/180 * 24 * 365 * 10
        rra5 = RRA(cf='AVERAGE',
                   xff=0.5,
                   steps=180,
                   rows=int(self.heartbeat / 180.0 * 24 * 365 * 10))

        rras.extend([rra1, rra2, rra3, rra4, rra5])

        rrd = RRD(os.path.join("history/", "%s.rrd" % self.value_id),
                  step=self.heartbeat,
                  ds=dss,
                  rra=rras,
                  start=self.time)
        rrd.create(debug=False)
Esempio n. 44
0
 def test_infoReadMode(self):
     expectedOutput = """
         filename = "/tmp/tmpP4bTTy"
         rrd_version = "0003"
         step = 300
         last_update = 920804400
         header_size = 800
         ds[speed].index = 0
         ds[speed].type = "COUNTER"
         ds[speed].minimal_heartbeat = 600
         ds[speed].min = NaN
         ds[speed].max = NaN
         ds[speed].last_ds = "U"
         ds[speed].value = 0.0000000000e+00
         ds[speed].unknown_sec = 0
         rra[0].cf = "AVERAGE"
         rra[0].rows = 24
         rra[0].cur_row = 3
         rra[0].pdp_per_row = 1
         rra[0].xff = 5.0000000000e-01
         rra[0].cdp_prep[0].value = NaN
         rra[0].cdp_prep[0].unknown_datapoints = 0
         rra[1].cf = "AVERAGE"
         rra[1].rows = 10
         rra[1].cur_row = 2
         rra[1].pdp_per_row = 6
         rra[1].xff = 5.0000000000e-01
         rra[1].cdp_prep[0].value = NaN
         rra[1].cdp_prep[0].unknown_datapoints = 0
         """
     rrd = RRD(filename=self.rrdfile.name, mode="r", backend=bindings)
     output = StringIO()
     self.assertTrue(os.path.exists(self.rrdfile.name))
     rrd.info(useBindings=True, stream=output)
     for obtained, expected in zip(
         output.getvalue().split("\n"), expectedOutput):
         print "obtained:", obtained
         print "expected:", expected
         if obtained.startswith("filename"):
             self.assertTrue(expected.strip().startswith("filename"))
         else:
             self.assertEquals(obtained.strip(), expected.strip())
     sys.stdout = originalStdout
Esempio n. 45
0
    def _boostrap(self):
        """Put together out bits"""
        self.dss = []
        self.ds1 = DS(dsName='depth', dsType='GAUGE', heartbeat=60 * 5)
        self.dss.extend([self.ds1])

        self.rras = []
        rra1 = RRA(cf='AVERAGE', xff=0.5, steps=1, rows=2016)
        self.rras.append(rra1)

        self.myRRD = RRD(self.datafile,
                         ds=self.dss,
                         rra=self.rras,
                         start=int(time.mktime(start_date.timetuple())))
        if not os.path.exists(self.datafile):
            # make sure we create the directory
            if not os.path.exists(os.path.dirname(self.datafile)):
                os.makedirs(os.path.dirname(self.datafile))
            self.myRRD.create()
Esempio n. 46
0
class Storage:
    # our storage object
    _rrd = None

    def __init__(self, filename="heatpumpMonitor.rrd"):
        if not os.path.isfile(filename):
            self._rrd = self._createRRD(filename)
        else:
            self._rrd = RRD(filename)

    def _createRRD(self, filename):
        """ create an rrd file which fits our requirements """

        # Let's setup some data sources for our RRD
        dss = []
        for source in dataSources:
          dss.append(DS(dsName=source, dsType='GAUGE', heartbeat=900))

        # An now let's setup how our RRD will archive the data
        rras = []
        # 1 days-worth of one-minute samples --> 60/1 * 24
        rra1 = RRA(cf='AVERAGE', xff=0, steps=1, rows=1440)
        # 7 days-worth of five-minute samples --> 60/5 * 24 * 7
        rra2 = RRA(cf='AVERAGE', xff=0, steps=5, rows=2016)
        # 30 days-worth of one hour samples --> 60/60 * 24 * 30
        rra3 = RRA(cf='AVERAGE', xff=0, steps=60, rows=720)
        # 1 year-worth of half day samples --> 60/60 * 24/12 * 365
        rra4 = RRA(cf='AVERAGE', xff=0, steps=720, rows=730)
        rras.extend([rra1, rra2, rra3, rra4])

        # With those setup, we can now created the RRD
        myRRD = RRD(filename, step=step, ds=dss, rra=rras, start=int(time.time()))
        myRRD.create(debug=False)
        return myRRD

    def add(self, aDict):
        """ adds the provided values to the rrd database with the current datetime """
        # we need to put the dict an correct line
        tmp = []
        for source in dataSources:
            tmp.append(aDict.get(source) or "U")
        self._rrd.bufferValue(int(time.time()), *tmp)
        self._rrd.update(debug=False)
Esempio n. 47
0
 def test_infoReadMode(self):
     expectedOutput = """
         filename = "/tmp/tmpP4bTTy"
         rrd_version = "0003"
         step = 300
         last_update = 920804400
         header_size = 800
         ds[speed].index = 0
         ds[speed].type = "COUNTER"
         ds[speed].minimal_heartbeat = 600
         ds[speed].min = NaN
         ds[speed].max = NaN
         ds[speed].last_ds = "U"
         ds[speed].value = 0.0000000000e+00
         ds[speed].unknown_sec = 0
         rra[0].cf = "AVERAGE"
         rra[0].rows = 24
         rra[0].cur_row = 3
         rra[0].pdp_per_row = 1
         rra[0].xff = 5.0000000000e-01
         rra[0].cdp_prep[0].value = NaN
         rra[0].cdp_prep[0].unknown_datapoints = 0
         rra[1].cf = "AVERAGE"
         rra[1].rows = 10
         rra[1].cur_row = 2
         rra[1].pdp_per_row = 6
         rra[1].xff = 5.0000000000e-01
         rra[1].cdp_prep[0].value = NaN
         rra[1].cdp_prep[0].unknown_datapoints = 0
         """
     rrd = RRD(filename=self.rrdfile.name, mode="r", backend=bindings)
     output = StringIO()
     self.assertTrue(os.path.exists(self.rrdfile.name))
     rrd.info(useBindings=True, stream=output)
     for obtained, expected in zip(output.getvalue().split("\n"),
                                   expectedOutput):
         print(("obtained:", obtained))
         print(("expected:", expected))
         if obtained.startswith("filename"):
             self.assertTrue(expected.strip().startswith("filename"))
         else:
             self.assertEqual(obtained.strip(), expected.strip())
     sys.stdout = originalStdout
Esempio n. 48
0
 def create_rrd(self):
     """
     Creates an RRD file.
     """
     dataSources = [DataSource(**ds) for ds in self.dataSources]
     roundRobinArchives = [RRA(**rra) for rra in self.roundRobinArchives]
     # start 时间设定为当前时间的一天前,86400 即一天内包含的秒数
     past_one_day = int(time.time()) - 86400
     self.rrd = RRD(self.filename, start=past_one_day, step=self.step,
                    ds=dataSources, rra=roundRobinArchives)
     self.rrd.create()
Esempio n. 49
0
    def _boostrap(self):
        """Put together out bits"""
        self.dss = []
        self.ds1 = DS(dsName='bookmark_count', dsType='GAUGE', heartbeat=hour)
        self.ds2 = DS(dsName='unique_count', dsType='GAUGE', heartbeat=hour)
        self.ds3 = DS(dsName='tag_count', dsType='GAUGE', heartbeat=hour)
        self.dss.extend([self.ds1, self.ds2, self.ds3])

        self.rras = []
        rra1 = RRA(cf='AVERAGE', xff=0.5, steps=24, rows=8760)
        self.rras.append(rra1)

        self.myRRD = RRD(self.datafile,
                         ds=self.dss,
                         rra=self.rras,
                         start=int(time.mktime(start_date.timetuple())))
        if not os.path.exists(self.datafile):
            # make sure we create the directory
            if not os.path.exists(os.path.dirname(self.datafile)):
                os.makedirs(os.path.dirname(self.datafile))
            self.myRRD.create()
Esempio n. 50
0
    def _rrdtool_log(self, count, filename):
        """ Workhorse for rrdtool logging.  Shouldn't be called directly. """

        if not os.path.isfile(filename):
            self.rrdtool_create(filename)
            # rrdtool complains if you stuff data into a freshly created
            # database less than one second after you created it.  We could do a
            # number of things to mitigate this:
            #   - sleep for 1 second here
            #   - return from this function and not log anything only on the
            #     first time we see a new data key (a new country, a new
            #     filename).
            #   - pre-create our databases at startup based on magical knowledge
            #     of what keys we're going to see coming over the AMQP line
            #
            # For now, we're just going to return.
            return

        # TODO -- Is this an expensive operation (opening the RRD)?  Can we make
        # this happen less often?
        rrd = RRD(filename)

        rrd.bufferValue(str(int(time.time())), str(count))

        # This flushes the values to file.
        # TODO -- Can we make this happen less often?
        rrd.update()
Esempio n. 51
0
def update(stringName, timestamp, key, value):
        if debug: print "Enter Function update(stringName, key, value)"
        
        #round robbing database file öffnen
	myRRD = RRD(baseDir + stringName + "_" + key + ".rrd")

	#Wert in round robbin database eintragen
        try:
            myRRD.bufferValue(timestamp, value)
	    myRRD.update()
        except:
            print "value in the past"
        
        if debug: myRRD.info()
Esempio n. 52
0
  def __init__(self, filename):
    if os.path.isfile(filename):
      self.rrd = RRD(filename)
    else:
      dataSources = []
      dataSources.append( DataSource(dsName='q1', dsType='GAUGE', heartbeat=600, minval=0) )
      dataSources.append( DataSource(dsName='q2', dsType='GAUGE', heartbeat=600, minval=0) )
      dataSources.append( DataSource(dsName='q3', dsType='GAUGE', heartbeat=600, minval=0) )
      dataSources.append( DataSource(dsName='lo', dsType='GAUGE', heartbeat=600, minval=0) )
      dataSources.append( DataSource(dsName='hi', dsType='GAUGE', heartbeat=600, minval=0) )
      dataSources.append( DataSource(dsName='total', dsType='GAUGE', heartbeat=600, minval=0) )

      roundRobinArchives = []
      roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=1, rows=8640)) # 24h at 1 sample per 10 secs
      roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=90, rows=2880)) # 1 month at 1 sample per 15 mins
      roundRobinArchives.append(RRA(cf='AVERAGE', xff=0.5, steps=2880, rows=5475)) # 5 years at 1 sample per 8 hours

      self.rrd = RRD(filename, step=10, ds=dataSources, rra=roundRobinArchives, start=int(time.time()))
      self.rrd.create()

    self.bucket = { 'a': [], 'b': [] }
    self.current_bucket = 'a'
Esempio n. 53
0
    def rrdtool_create(self, filename):
        """ Create an rrdtool database if it doesn't exist """

        # Our hearbeat is twice the step interval.
        step = 15
        heartbeat = 8*step

        sources = [
            DataSource(
                dsName='sum', dsType='GAUGE', heartbeat=heartbeat)
        ]

        archives = [
            RRA(cf='AVERAGE', xff=0.5, steps=1, rows=244),
            RRA(cf='AVERAGE', xff=0.5, steps=24, rows=244),
            RRA(cf='AVERAGE', xff=0.5, steps=168, rows=244),
            RRA(cf='AVERAGE', xff=0.5, steps=672, rows=244),
            RRA(cf='AVERAGE', xff=0.5, steps=5760, rows=374),
        ]

        rrd = RRD(filename, ds=sources, rra=archives, start=int(time.time()))
        rrd.create()
Esempio n. 54
0
 def insert(self):
     """ 
     Inserts new data in the RRD database 
     """
     rrd = RRD(os.path.join("history/", "%s.rrd" % self.value_id))
     rrd.bufferValue(self.time, self.value_value)
     rrd.update()
     print self.time, self.value_value
Esempio n. 55
0
File: data.py Progetto: jul/dsat
def create_rrd_if(cfg, _type, **kw):
    cfg.update(kw)
    _path = cfg["_path"]
    if not path.exists(_path):
        raise IOError("Non existing path %(path)s" % cfg)
    fn = path.join(_path, _type.endswith(".rrd") and _type or "%s.rrd" % _type)
    if path.isfile(fn):
        #should check  RRD magic number if I was paranoid
        raise IOError("already exists %s" % fn)
    ds = [ DataSource(**{ 
            k:s_dct.get(k,cfg["_default"].get(k, "arg"))
                for k in set(s_dct.keys()) | set(cfg["_default"].keys())
        }) for s_dct in cfg[_type]["source"]]
    rra = [
    "RRA:MIN:0.5:10:57600",
    "RRA:AVERAGE:0.5:1:57600",
    "RRA:AVERAGE:0.5:10:57600",
    "RRA:AVERAGE:0.5:100:57600",
    "RRA:LAST:0:10:57600",
    "RRA:MAX:0:10:57600",
    ]
    archive = RRD(fn,rra=rra, ds=ds, **cfg.get("_rrd_option",{}))
    return archive.create()
Esempio n. 56
0
    def create(self):
        ''' Create the RRD '''
        # 5 minute average for daily view
        self._rra.append(RRA(cf='AVERAGE', xff=0.5, steps=1, rows=288))

        # 30 minute average for weekly view
        self._rra.append(RRA(cf='AVERAGE', xff=0.5, steps=6, rows=336))
        self._rra.append(RRA(cf='MAX',     xff=0.5, steps=6, rows=336))
        self._rra.append(RRA(cf='MIN',     xff=0.5, steps=6, rows=336))

        # 2 hour average for monthly view
        self._rra.append(RRA(cf='AVERAGE', xff=0.5, steps=24, rows=360))
        self._rra.append(RRA(cf='MAX',     xff=0.5, steps=24, rows=360))
        self._rra.append(RRA(cf='MIN',     xff=0.5, steps=24, rows=360))

        # 24 hour average for yearly view
        self._rra.append(RRA(cf='AVERAGE', xff=0.5, steps=288, rows=365))
        self._rra.append(RRA(cf='MAX',     xff=0.5, steps=288, rows=365))
        self._rra.append(RRA(cf='MIN',     xff=0.5, steps=288, rows=365))

        print "Create %s" % (self._rrd_file)
        my_rrd = RRD(self._rrd_file, ds=self._ds, rra=self._rra)
        my_rrd.create()
Esempio n. 57
0
    def _createRRD(self, filename):
        """ create an rrd file which fits our requirements """

        # Let's setup some data sources for our RRD
        dss = []
        for source in dataSources:
          dss.append(DS(dsName=source, dsType='GAUGE', heartbeat=900))

        # An now let's setup how our RRD will archive the data
        rras = []
        # 1 days-worth of one-minute samples --> 60/1 * 24
        rra1 = RRA(cf='AVERAGE', xff=0, steps=1, rows=1440)
        # 7 days-worth of five-minute samples --> 60/5 * 24 * 7
        rra2 = RRA(cf='AVERAGE', xff=0, steps=5, rows=2016)
        # 30 days-worth of one hour samples --> 60/60 * 24 * 30
        rra3 = RRA(cf='AVERAGE', xff=0, steps=60, rows=720)
        # 1 year-worth of half day samples --> 60/60 * 24/12 * 365
        rra4 = RRA(cf='AVERAGE', xff=0, steps=720, rows=730)
        rras.extend([rra1, rra2, rra3, rra4])

        # With those setup, we can now created the RRD
        myRRD = RRD(filename, step=step, ds=dss, rra=rras, start=int(time.time()))
        myRRD.create(debug=False)
        return myRRD
Esempio n. 58
0
 def create_db(cls, filename, step, start):
   dss = []
   for i in range(SysStat.num_cpu() + 1):
     # maxval is the total number of jiffies in the interval
     if i == 0:
       n, cpu_mul = "", SysStat.num_cpu()
     else:
       n, cpu_mul = "_%d" % (i-1), 1
     dss.extend([
       DataSource(dsName='cpu_user%s' % n, dsType='COUNTER', heartbeat=3*step,
                  minval=0, maxval=cpu_mul * SysStat.jiffies() * step),
       DataSource(dsName='cpu_nice%s' % n, dsType='COUNTER', heartbeat=3*step,
                  minval=0, maxval=cpu_mul * SysStat.jiffies() * step),
       DataSource(dsName='cpu_sys%s' % n, dsType='COUNTER', heartbeat=3*step,
                  minval=0, maxval=cpu_mul * SysStat.jiffies() * step),
       DataSource(dsName='cpu_idle%s' % n, dsType='COUNTER', heartbeat=3*step,
                  minval=0, maxval=cpu_mul * SysStat.jiffies() * step),
       DataSource(dsName='cpu_iowt%s' % n, dsType='COUNTER', heartbeat=3*step,
                  minval=0, maxval=cpu_mul * SysStat.jiffies() * step)
     ])
   db = RRD(filename, ds=dss, rra=cls.generate_archives(step),
            start=start, step=step)
   db.create()
   return db