예제 #1
0
    # checking for established connections between E-L-K
    tools.check_elk_connection()

    # function to index the default template mapping of the data
    tools.index_template()

    # getting the required buckets names to index from get_s3_bucket_dir_to_index()
    s3_dir_to_index = tools.get_s3_bucket_dir_to_index()
    if s3_dir_to_index == 1:
        print 'I could not find any billing report under Bucket ', os.environ[
            'S3_BUCKET_NAME'], ' under Path ', os.environ['S3_REPORT_PATH']
        sys.exit(1)

    # downloading the csv file with get_req_csv_from_s3() and then calling the index_csv() to index it in our elasticsearch
    for dir_name in s3_dir_to_index:
        gzip_filename = tools.get_latest_zip_filename(dir_name)
        csv_filename = tools.get_req_csv_from_s3(dir_name, gzip_filename)
        print(gzip_filename, csv_filename)
        tools.index_csv(csv_filename, dir_name)

    # function to index deafualt dashboards, viasualization and search mapping in the .kibana index of elasticsearch
    # kibana is indexed at last because the data will be ready to index at this time
    tools.index_kibana()

    # delete the intermediate files
    tools.delete_csv_json_files()

    # /sbin/init is not working so used this loop to keep the docker up, Have to change it!
    while (True):
        time.sleep(0.2)
예제 #2
0
    #initialize the tools class
    tools = Tools(s3)

    # checking for established connections between E-L-K
    tools.check_elk_connection()

    # function to index the default template mapping of the data
    tools.index_template()

    # getting the required buckets names to index from get_s3_bucket_dir_to_index()
    s3_dir_to_index = tools.get_s3_bucket_dir_to_index()

    # downloading the csv file with get_req_csv_from_s3() and then calling the index_csv() to index it in our elasticsearch
    for dir_name in s3_dir_to_index:
        gzip_filename = tools.get_latest_zip_filename(dir_name)
        csv_filename = tools.get_req_csv_from_s3(dir_name, gzip_filename)
        print(gzip_filename,csv_filename)
        tools.index_csv(csv_filename, dir_name)

    # function to index deafualt dashboards, viasualization and search mapping in the .kibana index of elasticsearch
    # kibana is indexed at last because the data will be ready to index at this time
    tools.index_kibana()

    # delete the intermediate files
    tools.delete_csv_json_files()

    # /sbin/init is not working so used this loop to keep the docker up, Have to change it!
    while(True):
        pass

예제 #3
0
import boto3
import subprocess
import time

if __name__ == '__main__':

    print('Orchestrate-test Running')
    #initialize the tools class
    tools = Tools()

    # checking for established connections between E-L-K
    tools.check_elk_connection()

    # function to index the default template mapping of the data
    tools.index_template()
    
    # index a sample test file with sum of unblended cost 1.24185686
    tools.index_csv('test/sample/test_ub_cost_2016-06.csv', '20160601-20160701')
    # rows of data in the csv, must be given as string
    data_count = '315'
    while(True):
        index_names = subprocess.check_output(['curl -XGET "elasticsearch:9200/_cat/indices/"'], shell=True, stderr=subprocess.PIPE)
        if 'aws-billing-2016.06' in index_names and data_count in index_names:
            break

    index_names = subprocess.check_output(['curl -XGET "elasticsearch:9200/_cat/indices/"'], shell=True, stderr=subprocess.PIPE)
    print(index_names)

    tools.index_kibana()
    tools.delete_csv_json_files()
예제 #4
0
import time

if __name__ == '__main__':

    print('Orchestrate-test Running')
    #initialize the tools class
    tools = Tools()

    # checking for established connections between E-L-K
    tools.check_elk_connection()

    # function to index the default template mapping of the data
    tools.index_template()

    # index a sample test file with sum of unblended cost 1.24185686
    tools.index_csv('test/sample/test_ub_cost_2016-06.csv',
                    '20160601-20160701')
    # rows of data in the csv, must be given as string
    data_count = '315'
    while (True):
        index_names = subprocess.check_output(
            ['curl -XGET "elasticsearch:9200/_cat/indices/"'],
            shell=True,
            stderr=subprocess.PIPE)
        if 'aws-billing-2016.06' in index_names and data_count in index_names:
            break

    index_names = subprocess.check_output(
        ['curl -XGET "elasticsearch:9200/_cat/indices/"'],
        shell=True,
        stderr=subprocess.PIPE)
    print(index_names)