Esempio n. 1
0
    def test_create_bucket_retention_list(self):
        my_org = self.find_my_org()

        bucket_name = generate_bucket_name()

        ret_list = []
        retention = BucketRetentionRules(every_seconds=3600)
        retention.type = "expire"
        ret_list.append(retention)

        my_bucket = self.buckets_api.create_bucket(bucket_name=bucket_name, org=my_org,
                                                   retention_rules=ret_list)

        self.assertEqual(my_bucket.name, bucket_name)

        self.delete_test_bucket(my_bucket)
Esempio n. 2
0
    def __init__(
        self,
        bucket_name=BUCKET,
        batch_size=LOG_BATCH_SIZE,
        data_retention=3600,
    ):
        self.organization = ORGANIZATION
        self.client = InfluxDBClient(url=INFLUXDB_URL,
                                     token=INFLUXDB_TOKEN,
                                     org=self.organization)
        self.batch_size = batch_size
        self.bucket_name = bucket_name

        self.write_api = self.client.write_api(write_options=WriteOptions(
            batch_size=self.batch_size))
        self.query_api = self.client.query_api()
        self.buckets_api = self.client.buckets_api()
        bucket = self.buckets_api.find_bucket_by_name(self.bucket_name)
        if bucket is None:
            logger.warning(f"Bucket {self.bucket_name!r} not found. "
                           f"Creating a bucket {self.bucket_name!r}.")
            retention_rules = None
            if data_retention is not None:
                retention_rules = BucketRetentionRules(
                    type="expire", every_seconds=data_retention)
            self.buckets_api.create_bucket(
                bucket_name=self.bucket_name,
                retention_rules=retention_rules,
                org=self.organization,
            )
Esempio n. 3
0
    def test_create_bucket_retention(self):
        my_org = self.find_my_org()

        bucket_name = generate_bucket_name()

        retention = BucketRetentionRules(type="expire", every_seconds=3600)
        desc = "bucket with retention"
        my_bucket = self.buckets_api.create_bucket(bucket_name=bucket_name, org=my_org,
                                                   retention_rules=retention, description=desc)

        self.assertEqual(my_bucket.description, desc)

        print(my_bucket)
        self.buckets_api.delete_bucket(my_bucket)
Esempio n. 4
0
    def resetInfluxDatabases(self):
        try:
            self.logger.warning("Resetting Influx-Database")

            if (self.influx_version == 1):
                self.influxClient.drop_database(self.influx_db)
                self.influxClient.create_database(self.influx_db)
            else:

                with InfluxDBClient(
                        url=f"http://{self.influx_host}:{self.influx_port}",
                        token=self.influx_token,
                        org=self.influx_org,
                        timeout=180_000) as client:

                    buckets_api = client.buckets_api()

                    my_bucket = buckets_api.find_bucket_by_name(self.influx_db)

                    if (my_bucket is not None):
                        buckets_api.delete_bucket(my_bucket)

                    org_name = self.influx_org
                    org = list(
                        filter(
                            lambda it: it.name == org_name,
                            self.influxClient.organizations_api().
                            find_organizations()))[0]
                    retention_rules = BucketRetentionRules(
                        type="forever",
                        every_seconds=0,
                        shard_group_duration_seconds=60 * 60 * 24 *
                        90)  #3600*24*365*200
                    created_bucket = buckets_api.create_bucket(
                        bucket_name=self.influx_db,
                        retention_rules=retention_rules,
                        org_id=org.id)

        except Exception as e:
            self.logger.exception('Crash!', exc_info=e)
            sys.exit(-99)
Esempio n. 5
0
 def retention_rule(cls) -> BucketRetentionRules:
     return BucketRetentionRules(type='expire', every_seconds=3600)
Esempio n. 6
0
from influxdb_client import InfluxDBClient, BucketRetentionRules
"""
Define credentials
"""
url = "http://localhost:8086"
token = "my-token"
org = "my-org"

with InfluxDBClient(url=url, token=token) as client:
    buckets_api = client.buckets_api()
    """
    Create Bucket with retention policy set to 3600 seconds and name "bucket-by-python"
    """
    print(f"------- Create -------\n")
    retention_rules = BucketRetentionRules(type="expire", every_seconds=3600)
    created_bucket = buckets_api.create_bucket(bucket_name="bucket-by-python",
                                               retention_rules=retention_rules,
                                               org=org)
    print(created_bucket)
    """
    Update Bucket
    """
    print(f"------- Update -------\n")
    created_bucket.description = "Update description"
    created_bucket = buckets_api.update_bucket(bucket=created_bucket)
    print(created_bucket)
    """
    List all Buckets
    """
    print(f"\n------- List -------\n")
Esempio n. 7
0
def __influx_main__():
    print(f"Ready to Connect to InfluxDB")

    #host = os.environ[ "INFLUXDB_SERVICE" ]
    #print(f"Host = {host}")

    #username = os.environ[ "INFLUXDB_USER" ]
    #password = os.environ[ "INFLUXDB_PASSWORD" ]

    #database = os.environ[ "INFLUXDB_NAME" ]

    try:
        print("Inside Try-Catch for InfluxDB")

        # *************
        # Version 2.1.1
        # *************
        with InfluxDBClient(url="http://vibm-influxdb-influxdb2:80",
                            token=MY_TOKEN,
                            org=ORG,
                            debug=True) as client:
            version = client.ping()
            print(f"Database Ping = {version}")

            buckets_api = client.buckets_api()

            buckets = buckets_api.find_buckets(org=ORG).buckets
            print(f"------------- List All Buckets -------------")
            for bucket in buckets:
                print(f"Existing Bucket --> {bucket}")

            bucket_id = buckets_api.find_bucket_by_name(MY_BUCKET)
            if bucket_id is not None:
                print(f"------------- Delete Tandem Bucket -------------")
                buckets_api.delete_bucket(bucket_id)
                print(f"Bucket Deleted ... {MY_BUCKET}")

            print(f"---------- Create Bucket for Tandem Data ----------")
            retention_rules = BucketRetentionRules(type="expire",
                                                   every_seconds=3600)
            created_bucket = buckets_api.create_bucket(
                bucket_name=MY_BUCKET,
                retention_rules=retention_rules,
                org=ORG)
            print(f"Bucket Created ... {created_bucket}")
            """
		  Prepare Data
		  """
            print(f"---------- Write Data to Bucket ----------")

            write_api = client.write_api(write_options=SYNCHRONOUS)

            _point1 = Point("eu_capitals_oC").tag(
                "location",
                "Amsterdam").field("temperature",
                                   18).time(datetime.utcnow(),
                                            WritePrecision.MS)
            _point2 = Point("eu_capitals_oC").tag("location", "Athens").field(
                "temperature", 19).time(datetime.utcnow(), WritePrecision.MS)
            _point3 = Point("eu_capitals_oC").tag(
                "location", "Belgrade").field("temperature",
                                              16).time(datetime.utcnow(),
                                                       WritePrecision.MS)
            _point4 = Point("eu_capitals_oC").tag("location", "Berlin").field(
                "temperature", 16).time(datetime.utcnow(), WritePrecision.MS)
            _point5 = Point("eu_capitals_oC").tag("location", "Bern").field(
                "temperature", 20).time(datetime.utcnow(), WritePrecision.MS)
            _point6 = Point("eu_capitals_oC").tag(
                "location",
                "Bratislava").field("temperature",
                                    20).time(datetime.utcnow(),
                                             WritePrecision.MS)
            _point7 = Point("eu_capitals_oC").tag(
                "location", "Brussels").field("temperature",
                                              18).time(datetime.utcnow(),
                                                       WritePrecision.MS)
            _point8 = Point("eu_capitals_oC").tag(
                "location",
                "Bucharest").field("temperature",
                                   20).time(datetime.utcnow(),
                                            WritePrecision.MS)
            _point9 = Point("eu_capitals_oC").tag(
                "location",
                "Copenhagen").field("temperature",
                                    12).time(datetime.utcnow(),
                                             WritePrecision.MS)
            _point10 = Point("eu_capitals_oC").tag("location", "Dublin").field(
                "temperature", 14).time(datetime.utcnow(), WritePrecision.MS)
            _point11 = Point("eu_capitals_oC").tag(
                "location", "Helsinki").field("temperature",
                                              3).time(datetime.utcnow(),
                                                      WritePrecision.MS)
            _point12 = Point("eu_capitals_oC").tag("location", "Kyiv").field(
                "temperature", 8).time(datetime.utcnow(), WritePrecision.MS)
            _point13 = Point("eu_capitals_oC").tag("location", "Lisbon").field(
                "temperature", 19).time(datetime.utcnow(), WritePrecision.MS)
            _point14 = Point("eu_capitals_oC").tag("location", "London").field(
                "temperature", 19).time(datetime.utcnow(), WritePrecision.MS)
            _point15 = Point("eu_capitals_oC").tag("location", "Madrid").field(
                "temperature", 17).time(datetime.utcnow(), WritePrecision.MS)

            write_api.write(bucket=MY_BUCKET, record=_point1)
            write_api.write(bucket=MY_BUCKET, record=_point2)
            write_api.write(bucket=MY_BUCKET, record=_point3)
            write_api.write(bucket=MY_BUCKET, record=_point4)
            write_api.write(bucket=MY_BUCKET, record=_point5)
            write_api.write(bucket=MY_BUCKET, record=_point6)
            write_api.write(bucket=MY_BUCKET, record=_point7)
            write_api.write(bucket=MY_BUCKET, record=_point8)
            write_api.write(bucket=MY_BUCKET, record=_point9)
            write_api.write(bucket=MY_BUCKET, record=_point10)
            write_api.write(bucket=MY_BUCKET, record=_point11)
            write_api.write(bucket=MY_BUCKET, record=_point12)
            write_api.write(bucket=MY_BUCKET, record=_point13)
            write_api.write(bucket=MY_BUCKET, record=_point14)
            write_api.write(bucket=MY_BUCKET, record=_point15)

        # Only for v1.0
        #client = db.InfluxDBClient(
        #	host="vibm-influxdb-influxdb2",
        #	port=80,
        #	username="******",
        #	password=None,
        #	database=MY_DBNAME,
        #	headers={"Authorization": MY_TOKEN})

        #bucket_api = client.buckets_api()

        #bucket_api.create_bucket( bucket_name="tandem" )

        #write_api = client.write_api()

        #p = Point("h2o_level").tag("location", "coyote_creek").field("water_level", 1)
        #write_api.write(bucket="tandem", org="influxdata", record=p)
        #write_api.write( "tandem", "influxdata", ["h2o_feet,location=coyote_creek water_level=1"])

        #query_api = client.query_api()

        #query = 'from(bucket:"tandem")\
        #|> range(start: -10m)\
        #|> filter(fn:(r) => r._measurement == “h2o_level”)\
        #|> filter(fn:(r) => r.location == "coyote_creek")\
        #|> ilter(fn:(r) => r._field == "water_level" )'

        #result = query_api.query( org="influxdata", query=query )

        results = []

        #for table in result:
        #	for record in table.records:
        #		results.append((record.get_value(), record.get_field()))

        print(f"My Results = {results}")

        #client.switch_user("admin", "")

        #users = client.get_list_users()
        #print(f"Users = {users}")

        client.close()
    except Exception as e:
        print(f"Exception is Raised ... {e}")

    #client.get_list_database()

    #client.close()

    return
Esempio n. 8
0
def main():
    if sys.argv[1].casefold() == "init":
        init_environment()
        return

    try:
        bucket_api.create_bucket(bucket=Bucket(
            name=BUCKET,
            org_id=ORG_ID,
            retention_rules=[BucketRetentionRules(every_seconds=604800)]))
    except:
        print("Bucket already exits")

    try:
        if (os.path.exists(IP_LOOKUP_TABLE_PATH) == False):
            build_IP_lookup_table()
    except:
        print("issue with initial Ip table build")
        print("Try (re)running the init command")
        return

    user_data = get_and_match_user_data()

    ### Argument handling
    if len(sys.argv) < 2:
        print("Not Enough Arguments Provided")
        print("For list of arguments use python OpenVPNLogging.py help")
        return

    if sys.argv[1].casefold() == "help":
        help()
        return

    elif sys.argv[1].casefold() == "status":
        print_formated_data(user_data)
        purge_lookup_table()
        return

    elif sys.argv[1].casefold() == "log":
        prev_data = load_prev_pulled_data()
        if prev_data == False:
            for key in user_data.keys():
                current = user_data[key]
                log_login_event(client, current)
                log_data_usage(client, current[0], current[1], current[2], 0,
                               0)
                cache_prev(user_data)
            return

        log_active_users(client, user_data)
        log_failed_auth(client)

        for key in prev_data.keys():
            prev = prev_data[key]

            try:
                current = user_data[key]
            except:  ### indicates user no longer in the current users list, meaning logout event
                log_logout_event(client, prev)
                log_data_usage(
                    client, prev[0], prev[1], prev[2], 0, 0
                )  ### adds entry with 0s for both upload and data afer logout event in order to make the last() function in influx return useful results

        for key in user_data.keys():
            current = user_data[key]

            try:
                prev = prev_data[key]
            except:  ### indicates user is not listed in prev_data cache therefore login event occured
                log_login_event(client, current)
                log_data_usage(client, current[0], current[1], current[2], 0,
                               0)  ### Inisializes user records with zeroes
                cache_prev(user_data)
                continue

            do_login = False
            data_up_delta = int(current[3]) - int(prev[3])

            if (
                    data_up_delta < 0
            ):  ### Fix to the issue when user would disconnect then connect within the 1 minute window of the script being called, resulting in a negative delta
                data_up_delta = 0
                do_login = True

            data_down_delta = int(current[4]) - int(prev[4])

            if (
                    data_down_delta < 0
            ):  ### Fix to the issue when user would disconnect then connect within the 1 minute window of the script being called, resulting in a negative delta
                data_down_delta = 0
                do_login = True

            if (
                    do_login == True
            ):  ### Fix to the issue when user would disconnect then connect within the 1 minute window of the script being called, resulting in a negative delta
                log_logout_event(client, current)
                log_login_event(client, current)

            if (
                    datetime_to_mili_two(current[5]) < datetime_to_mili_two(
                        prev[5])
            ):  ### Fix to the issue where the OpenVPN log's track of the data would reset, then revert back after a period of time
                cache_prev(user_data)
                data_down_delta = 0
                data_up_delta = 0

            log_data_usage(client, current[0], current[1], current[2],
                           data_up_delta, data_down_delta)

        cache_prev(user_data)
        purge_lookup_table()
        end_time = time.perf_counter()
        print("Total Runtime: {time}s".format(time=end_time - start_time))
        return

    else:
        print(
            "Invalid Argument, Please use the help argument in order to see available commands"
        )
        return