コード例 #1
0
def loop_body(opts, gstate):
    msgs = []

    def cb_add_item(key, val, category):
        msgs.append(
            ("starlink/dish_{0}/{1}/{2}".format(category, gstate.dish_id,
                                                key), val, 0, False))

    def cb_add_sequence(key, val, category, _):
        msgs.append(
            ("starlink/dish_{0}/{1}/{2}".format(category, gstate.dish_id, key),
             ",".join(str(x) for x in val), 0, False))

    rc = dish_common.get_data(opts, gstate, cb_add_item, cb_add_sequence)

    if msgs:
        try:
            paho.mqtt.publish.multiple(msgs,
                                       client_id=gstate.dish_id,
                                       **opts.mqargs)
            if opts.verbose:
                print("Successfully published to MQTT broker")
        except Exception as e:
            dish_common.conn_error(opts,
                                   "Failed publishing to MQTT broker: %s",
                                   str(e))
            rc = 1

    return rc
コード例 #2
0
def ensure_schema(opts, conn, context):
    cur = conn.cursor()
    cur.execute("PRAGMA user_version")
    version = cur.fetchone()
    if version and version[0] == SCHEMA_VERSION and not opts.force:
        cur.close()
        return 0

    try:
        if not version or not version[0]:
            if opts.verbose:
                print("Initializing new database")
            create_tables(conn, context, "")
        elif version[0] > SCHEMA_VERSION and not opts.force:
            logging.error(
                "Cowardly refusing to downgrade from schema version %s",
                version[0])
            return 1
        else:
            print("Converting from schema version:", version[0])
            convert_tables(conn, context)
        cur.execute("PRAGMA user_version={0}".format(SCHEMA_VERSION))
        conn.commit()
        return 0
    except starlink_grpc.GrpcError as e:
        dish_common.conn_error(opts, "Failure reflecting status fields: %s",
                               str(e))
        return 1
    finally:
        cur.close()
コード例 #3
0
def flush_points(opts, gstate):
    try:
        while len(gstate.points) > MAX_BATCH:
            gstate.influx_client.write_points(
                gstate.points[:MAX_BATCH],
                time_precision="s",
                retention_policy=opts.retention_policy)
            if opts.verbose:
                print("Data points written: " + str(MAX_BATCH))
            del gstate.points[:MAX_BATCH]
        if gstate.points:
            gstate.influx_client.write_points(
                gstate.points,
                time_precision="s",
                retention_policy=opts.retention_policy)
            if opts.verbose:
                print("Data points written: " + str(len(gstate.points)))
            gstate.points.clear()
    except Exception as e:
        dish_common.conn_error(opts, "Failed writing to InfluxDB database: %s",
                               str(e))
        # If failures persist, don't just use infinite memory. Max queue
        # is currently 10 days of bulk data, so something is very wrong
        # if it's ever exceeded.
        if len(gstate.points) > MAX_QUEUE_LENGTH:
            logging.error("Max write queue exceeded, discarding data.")
            del gstate.points[:-MAX_QUEUE_LENGTH]
        return 1

    return 0
コード例 #4
0
def print_header(opts):
    header = ["datetimestamp_utc"]

    def header_add(names):
        for name in names:
            name, start, end = dish_common.BRACKETS_RE.match(name).group(
                1, 4, 5)
            if start:
                header.extend(name + "_" + str(x)
                              for x in range(int(start), int(end)))
            elif end:
                header.extend(name + "_" + str(x) for x in range(int(end)))
            else:
                header.append(name)

    if opts.satus_mode:
        context = starlink_grpc.ChannelContext(target=opts.target)
        try:
            name_groups = starlink_grpc.status_field_names(context=context)
        except starlink_grpc.GrpcError as e:
            dish_common.conn_error(
                opts, "Failure reflecting status field names: %s", str(e))
            return 1
        if "status" in opts.mode:
            header_add(name_groups[0])
        if "obstruction_detail" in opts.mode:
            header_add(name_groups[1])
        if "alert_detail" in opts.mode:
            header_add(name_groups[2])

    if opts.bulk_mode:
        general, bulk = starlink_grpc.history_bulk_field_names()
        header_add(general)
        header_add(bulk)

    if opts.history_stats_mode:
        groups = starlink_grpc.history_stats_field_names()
        general, ping, runlen, latency, loaded, usage = groups[0:6]
        header_add(general)
        if "ping_drop" in opts.mode:
            header_add(ping)
        if "ping_run_length" in opts.mode:
            header_add(runlen)
        if "ping_loaded_latency" in opts.mode:
            header_add(loaded)
        if "ping_latency" in opts.mode:
            header_add(latency)
        if "usage" in opts.mode:
            header_add(usage)

    print(",".join(header))
    return 0
コード例 #5
0
def flush_points(opts, gstate):
    try:
        while len(gstate.points) > MAX_BATCH:
            if 'token' not in opts.icargs:
                gstate.influx_client.write_points(
                    gstate.points[:MAX_BATCH],
                    time_precision="s",
                    retention_policy=opts.retention_policy)
            else:
                points = [
                    Point.from_dict(i, write_precision=WritePrecision.S)
                    for i in gstate.points[:MAX_BATCH]
                ]
                gstate.influx_client.write_api(
                    write_options=SYNCHRONOUS).write(opts.icargs['database'],
                                                     opts.icargs['org'],
                                                     points)
            if opts.verbose:
                print("Data points written: " + str(MAX_BATCH))
            del gstate.points[:MAX_BATCH]
        if gstate.points:
            if 'token' not in opts.icargs:
                gstate.influx_client.write_points(
                    gstate.points,
                    time_precision="s",
                    retention_policy=opts.retention_policy)
            else:
                points = [
                    Point.from_dict(i, write_precision=WritePrecision.S)
                    for i in gstate.points
                ]
                gstate.influx_client.write_api(
                    write_options=SYNCHRONOUS).write(opts.icargs['database'],
                                                     opts.icargs['org'],
                                                     points)

            if opts.verbose:
                print("Data points written: " + str(len(gstate.points)))
            gstate.points.clear()
    except Exception as e:
        dish_common.conn_error(opts, "Failed writing to InfluxDB database: %s",
                               str(e))
        # If failures persist, don't just use infinite memory. Max queue
        # is currently 10 days of bulk data, so something is very wrong
        # if it's ever exceeded.
        if len(gstate.points) > MAX_QUEUE_LENGTH:
            logging.error("Max write queue exceeded, discarding data.")
            del gstate.points[:-MAX_QUEUE_LENGTH]
        return 1

    return 0
コード例 #6
0
def sync_timebase(opts, gstate):
    try:
        db_counter, db_timestamp = query_counter(gstate,
                                                 gstate.start_timestamp,
                                                 gstate.timestamp)
    except Exception as e:
        # could be temporary outage, so try again next time
        dish_common.conn_error(opts,
                               "Failed querying InfluxDB for prior count: %s",
                               str(e))
        return
    gstate.timebase_synced = True

    if db_counter and gstate.start_counter <= db_counter:
        del gstate.deferred_points[:db_counter - gstate.start_counter]
        if gstate.deferred_points:
            delta_timestamp = db_timestamp - (
                gstate.deferred_points[0]["time"] - 1)
            # to prevent +/- 1 second timestamp drift when the script restarts,
            # if time base is within 2 seconds of that of the last sample in
            # the database, correct back to that time base
            if delta_timestamp == 0:
                if opts.verbose:
                    print("Exactly synced with database time base")
            elif -2 <= delta_timestamp <= 2:
                if opts.verbose:
                    print(
                        "Replacing with existing time base: {0} -> {1}".format(
                            db_counter,
                            datetime.fromtimestamp(db_timestamp,
                                                   tz=timezone.utc)))
                for point in gstate.deferred_points:
                    db_timestamp += 1
                    if point["time"] + delta_timestamp == db_timestamp:
                        point["time"] = db_timestamp
                    else:
                        # lost time sync when recording data, leave the rest
                        break
                else:
                    gstate.timestamp = db_timestamp
            else:
                if opts.verbose:
                    print(
                        "Database time base out of sync by {0} seconds".format(
                            delta_timestamp))

    gstate.points.extend(gstate.deferred_points)
    gstate.deferred_points.clear()