Esempio n. 1
0
def makeAndPublishIDB(measname, fields, db, tags, table, debug=False):
    """
    Same as above! Often repeated so now it's a function
    """
    pkt = packetizer.makeInfluxPacket(measname,
                                      ts=None,
                                      tags=tags,
                                      fields=fields,
                                      debug=debug)

    if db is not None and pkt is not None:
        db.singleCommit(pkt, table=table, close=True)
Esempio n. 2
0
def readAllSensors():
    fields = {}
    for sensor in W1ThermSensor.get_available_sensors():
        sty = sensor.type_name
        sid = sensor.id
        stp = sensor.get_temperature()
        print("Sensor %s (%s) has temperature %.2f" % (sid, sty, stp))
        fields.update({sid: stp})

    pkt = packetizer.makeInfluxPacket(meas=['temperatures'], fields=fields)
    print(pkt)

    return pkt
Esempio n. 3
0
def systemStats(printerip):
    """
    Query the printer for memory
    """
    endpoint = "/system/memory"
    mem = api.queryChecker(printerip, endpoint)
    if mem != {}:
        mem.update({"free": mem['total'] - mem['used']})

        pkt = packetizer.makeInfluxPacket(meas=['system'],
                                          fields=mem,
                                          tags=None)
    else:
        # Silly.
        pkt = []

    return pkt
Esempio n. 4
0
def main():
    """
    """
    # Define the default files we'll use/look for. These are passed to
    #   the worker constructor (toServeMan).
    conf = './config/radia.conf'
    passes = './config/passwords.conf'
    logfile = '/tmp/radia.log'
    desc = "Radia: A SNMP Grabber"
    eargs = radia.parseargs.extraArguments
    conftype = classes.snmpTarget

    # Interval between successive runs of the polling loop (seconds)
    bigsleep = 60

    # config: dictionary of parsed config file
    # comm: common block from config file
    # args: parsed options
    # runner: class that contains logic to quit nicely
    config, comm, args, runner = workerSetup.toServeMan(conf,
                                                        passes,
                                                        logfile,
                                                        desc=desc,
                                                        extraargs=eargs,
                                                        conftype=conftype,
                                                        logfile=True)

    # Get this PID for diagnostics
    pid = os.getpid()

    # Print the preamble of this particular instance
    #   (helpful to find starts/restarts when scanning thru logs)
    common.printPreamble(pid, config)

    # Check to see if there are any connections/objects to establish
    idbs = connSetup.connIDB(comm)

    # Set up the different SNMP (snimpy) managers for the devices...
    #   Do this here outside the main loop so we're not always
    #   loading MIBs from disk and parsing them every time
    smans = {}
    for snmptarg in config:
        # snmptarg will be the *key* from a config file section!
        snmpManager = radia.snmptools.setupSNMPTarget(config[snmptarg],
                                                      loadMIBs=True)
        smans.update({snmptarg: snmpManager})

    # Semi-infinite loop
    while runner.halt is False:
        # Loop over the defined SNMP targets in the config file
        for snmptarg in config:
            print(snmptarg)
            valDict = radia.snmptools.grabEndpoints(smans[snmptarg],
                                                    config[snmptarg])

            # Before we can make the packet, we need to convert from
            #   the default snimpy datatypes
            valDict = radia.snmptools.convertDatatypes(valDict)
            print(valDict)

            if valDict != {}:
                # This means that we stored at least something valid,
                #   so construct a influxdb packet and store it!
                packet = packetizer.makeInfluxPacket(meas=[snmptarg],
                                                     ts=None,
                                                     tags=None,
                                                     fields=valDict)
                print(packet)

                # Grab the relevant/specified database
                db = idbs[config[snmptarg].database]

                # Technically this is hardcoded for an influxdb db
                db.singleCommit(packet,
                                table=config[snmptarg].databasetable,
                                close=True)

            # Mini sleep between targets
            time.sleep(1)

        # Consider taking a big nap
        if runner.halt is False:
            print("Starting a big sleep")
            # Sleep for bigsleep, but in small chunks to check abort
            for _ in range(bigsleep):
                time.sleep(1)
                if runner.halt is True:
                    break

    # The above loop is exited when someone sends SIGTERM
    print("PID %d is now out of here!" % (pid))

    # The PID file will have already been either deleted/overwritten by
    #   another function/process by this point, so just give back the
    #   console and return STDOUT and STDERR to their system defaults
    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__
    print("STDOUT and STDERR reset.")
Esempio n. 5
0
def tempFlow(printerip, nsamps=800, debug=False):
    """
    Query the printer for temperatures, and format/prepare them for storage.

    With what looks like a sample rate of ~10 Hz, 800 samples will give
    ~80 seconds worth of temperature data.  For a query interval of
    60 seconds, which could vary depending on some picture stuff, this
    will be a pretty good representation of the performance/stability.

    Entirely designed for putting into an influxdb database. If you want
    another database type, well, point it at a different formatting
    function in the conditional check on tres.
    """
    boottime = None
    # Temperature timestamps are in seconds since boot ... kinda.
    #   It *looks* like Ultimaker uses time.monotonic() in a lot of places,
    #   and the reference point for that is *technically* undefined according
    #   to the python docs; in practice it's probably close enough, though
    #   it's worth noting that it's *immune* to NTP updates and system
    #   clock changes in general. Ugh.
    uptimeSec = api.queryChecker(printerip, "/system/uptime")
    if uptimeSec != {}:
        # You may be tempted to choose .utcnow() instead of now(); but,
        #   that'd be a mistake.  Influx tries to be fancy for you,
        #   so it's easier to just get the regular time and hope for the best
        #   Otherwise you'll be tracing UTC offsets in the dashboard(s)
        #   for literally ever, which is the worst.
        boottime = dt.datetime.now() - dt.timedelta(seconds=uptimeSec)
        if debug is True:
            print("Calculated datetime data offset: ", boottime)

    # This query is a house of cards; if it fails because the printer
    #   is unreachable, literally everything will implode. So check that
    #   the return value isn't empty!!!
    endpoint = "/printer/diagnostics/temperature_flow/%d" % (nsamps)
    tres = api.queryChecker(printerip, endpoint)

    if tres != {} and boottime is not None:
        # For the Ultimaker 3e, the flow sensor hardware was removed before
        #   the printer shipped so the following are always 0 or 65535;
        #   We exclude them from the results because that's annoying.
        # NOTE: case isn't checked, so they must be *exact* matches!
        #       Also - "Time" is skipped because we store that differently
        bklst = ['Time',
                 'flow_sensor0', 'flow_steps0',
                 'flow_sensor1', 'flow_steps1']

        allpkts = []

        for i, points in enumerate(tres):
            # At this point, if the query is successful, tres is a list of
            #   lists,  the first of which is the labels and the rest are
            #   lists of values matching those labels.
            if i == 0:
                flabs = points
                # Translate the blacklisted labels to
                if bklst != []:
                    gi = [k for k, lab in enumerate(flabs) if lab not in bklst]
                else:
                    gi = []
            else:
                # Make an influxdb packet, but first do some contortions
                #   to make the timestamp a real timestamp rather than
                #   just an offset from boot
                ts = boottime + dt.timedelta(seconds=points[0])
                # Convert it to milliseconds for influx
                #   NOTE: It CAN NOT be a float! Must be an Int :(
                #         If we omit ndigits to round() it'll return an int
                ts = round(float(ts.strftime("%s.%f"))*1e3)

                # Grab the non-blacklisted items and construct our packet
                #   I can't think of a good way to put this into a set of
                #   non-confusing list comprehensions so I'm breaking it out
                fields = {}
                for idx in gi:
                    fields.update({flabs[idx]: points[idx]})

                pkt = packetizer.makeInfluxPacket(meas=['temperatures'],
                                                  ts=ts, fields=fields,
                                                  tags=None)
                # Have to do pkt[0] because makeInfluxPacket is still
                #   annoying and quirky
                allpkts.append(pkt[0])
    else:
        print("ERROR: Printer query failed!")
        # This happens when the printer query fails
        allpkts = []

    return allpkts