Esempio n. 1
0
def report_metrics(server, token, pure1_api_id, pure1_pk_file,pure1_pk_pwd, resource_type, interval_seconds, start_time, resolution_ms):
    
    pure1Client = pure1.Client(private_key_file=pure1_pk_file, private_key_password=pure1_pk_pwd, app_id=pure1_api_id)

    metrics_list = get_metrics_list(pure1Client, resource_type, resolution_ms)

    #hardcoding metrics array list for testing purposes
    #testMetric = pure1.Metric(name = 'array_read_iops')
    #metrics_list = [testMetric]

    response = None
    if resource_type == "arrays":
        response = pure1Client.get_arrays()
        #currently only supports array metrics
    resources = []
    if response is not None:
        resources = list(response.items)

    wavefront_sender = WavefrontDirectClient(
        server=server,
        token=token,
        max_queue_size=10000,
        batch_size=4000,
        flush_interval_seconds=5)

    #Retrieves data from Pure1 for the last 7 days (or based on specified start time) in increments of 30 minutes
    days_count = 7
    if interval_seconds == -1:
        interval_seconds = 1800
        if start_time != 0:
            initial_start = start_time
            end = int((datetime.datetime.now() - datetime.timedelta(hours = 2)).timestamp())
            timespan_seconds = end - start_time
        else:
            timespan_seconds = 3600 * (24 * days_count - 2) #querying for `days_count` days of data up to 2 hours from now           
            initial_start = int((datetime.datetime.now() - datetime.timedelta(days = days_count)).timestamp())
        
        loops = - (-timespan_seconds // interval_seconds) # number of 360 seconds intervals in days_count days (-2 hours)

        for i in range(0, loops-1):
            start = initial_start + i*interval_seconds
            end = start + interval_seconds
            print("Start Time:", start, "End Time:", end)
            get_send_data(pure1Client, wavefront_sender, metrics_list, resources, server, token, resolution_ms, start, end)
    else:
        end = int((datetime.datetime.now() - datetime.timedelta(hours = 2)).timestamp())
        start = int(end - datetime.timedelta(seconds=interval_seconds).total_seconds())
        print("Start Time:", start, "End Time:", end)
        get_send_data(pure1Client, wavefront_sender, metrics_list, resources, server, token, resolution_ms, start, end)

    wavefront_sender.close()
Esempio n. 2
0
    def parse(self, inventory, loader, path, cache=True):
        # call base method to ensure properties are available for use with other helper methods
        super(InventoryModule, self).parse(inventory, loader, path, cache)

        # this method will parse 'common format' inventory sources and
        # update any options declared in DOCUMENTATION as needed
        self._read_config_data(path)

        app_id = self.get_option('app_id')
        private_key_file = os.path.expanduser(
            self.get_option('private_key_file'))
        private_key_password = self.get_option('private_key_password')

        pure1Client = pure1.Client(app_id=app_id,
                                   private_key_file=private_key_file,
                                   private_key_password=private_key_password)

        self.generate_fleet_inventory(pure1Client)
Esempio n. 3
0
def generate_fleet_report(pure1_api_id, pure1_pk_file, pure1_pk_pwd):
    
    pure1Client = pure1.Client(private_key_file=pure1_pk_file, private_key_password=pure1_pk_pwd, app_id=pure1_api_id)

    response = None
    response = pure1Client.get_arrays(filter="contains(model,'FlashBlade')")

    arrays = []
    if response is not None:
        arrays = list(response.items)
    report_filename = str.format('pure1_fb_inventory_last{}_days_{}.csv', REPORTING_INTERVAL_DAYS, AGGREGATION_TYPE)
    with open(report_filename, 'w') as csvfile:
        filewriter = csv.writer(csvfile, delimiter=',',
                                quotechar='|', quoting=csv.QUOTE_MINIMAL)
        filewriter.writerow(['Array Name', 'Model', 'OS Version', 'Total Blades', 'Used Blades', 'Total Capacity (TiB)', 'File Systems (TiB)','Object Stores (GiB)','Snapshots (GiB)', str.format('% Used (Last {} days)', REPORTING_INTERVAL_DAYS), str.format('% Used (Prev. {} days)', REPORTING_INTERVAL_DAYS), 'Data Reduction Ratio'])

        for array in arrays:
            os_version = str.format("{} {}",array.os, array.version)
            metrics_names = ['array_total_capacity', 'array_file_system_space', 'array_object_store_space','array_snapshot_space', 'array_data_reduction']
            start = int((datetime.datetime.now() - datetime.timedelta(days = REPORTING_INTERVAL_DAYS)).timestamp())
            #print(start)
            start_comparison = start - REPORTING_INTERVAL_DAYS * METRIC_RESOLUTION_DAY / 1000
            #print(start_comparison)
            end = int((datetime.datetime.now()).timestamp())
            response = pure1Client.get_blades(filter='arrays.id=\''+array.id+'\'')
            if hasattr(response, 'items'):
                arrays2 = next(iter(response.items))
                array_blades = next(iter(arrays2.arrays))
            
            response = pure1Client.get_metrics_history(aggregation=AGGREGATION_TYPE,names=metrics_names,resource_ids=array.id, resolution=METRIC_RESOLUTION_DAY*REPORTING_INTERVAL_DAYS, start_time=start, end_time=end)
            response_comparison = pure1Client.get_metrics_history(aggregation=AGGREGATION_TYPE,names=metrics_names,resource_ids=array.id, resolution=METRIC_RESOLUTION_DAY*REPORTING_INTERVAL_DAYS, start_time=start_comparison, end_time=start)

            total_capacity = 0
            total_capacity_previous = 0
            file_system_space = 0
            object_store_space = 0
            data_reduction = 0
            snapshot_space = 0
            total_used = 0
            total_used_previous = 0

            if hasattr(response, 'items'):
                metrics_items = list(response.items)
                compared_metrics = iter(list(response_comparison.items))
                for metric_item in metrics_items:
                    compared_metric = next(compared_metrics)
                    if metric_item.data:
                        compared_metric_iter = iter(compared_metric.data)
                        #print('compared metric')
                        #print(compared_metric)
                        for metric_data in metric_item.data:
                            try:
                                compared_metric_data = next(compared_metric_iter)
                            except StopIteration:
                                compared_metric_data = [0,0]
                            #print(metric_item.name)
                            #print(compared_metric_data)
                            #print(metric_data)
                            metric_name = metric_item.name
                            if metric_name == 'array_total_capacity':
                                total_capacity = round(metric_data[1]/BYTES_IN_A_TEBIBYTE,2)
                                total_capacity_previous = round(compared_metric_data[1]/BYTES_IN_A_TEBIBYTE,2)
                            elif metric_name == 'array_object_store_space':
                                object_store_space = round(metric_data[1]/BYTES_IN_A_GIBIBYTE, 2)
                                total_used = total_used + metric_data[1]
                                total_used_previous = total_used_previous + compared_metric_data[1]
                            elif metric_name == 'array_file_system_space':
                                file_system_space = round(metric_data[1]/BYTES_IN_A_TEBIBYTE, 2)
                                total_used = total_used + metric_data[1]
                                total_used_previous = total_used_previous + compared_metric_data[1]
                            elif metric_name == 'array_snapshot_space':
                                total_used = total_used + metric_data[1]
                                total_used_previous = total_used_previous + compared_metric_data[1]
                                snapshot_space = round(metric_data[1]/BYTES_IN_A_GIBIBYTE, 2)
                            elif metric_name == 'array_data_reduction':
                                data_reduction = round(metric_data[1], 2)

                percent_used_previous = 0
                if total_capacity_previous != 0:
                    percent_used_previous = round(total_used_previous*100/total_capacity_previous/BYTES_IN_A_TEBIBYTE, 1)
                if total_capacity != 0:
                    percent_used = round(total_used*100/total_capacity/BYTES_IN_A_TEBIBYTE, 1)

                    filewriter.writerow([array.name, array.model, os_version, array_blades.total_blades, array_blades.used_blades, total_capacity, file_system_space, object_store_space, snapshot_space, percent_used, percent_used_previous, data_reduction])
            else:
                if response.status_code == 429 or response.status_code == 404:
                    print(response.errors[0].message)
                    if response.errors[0].context is not None:
                        print(response.errors[0].context)
                    if response.status_code == 429:
                        print("Remaining requests: " + response.headers.x_ratelimit_limit_minute) 
                else:     
                    print(str.format("error code: {}\n error: {}", response.status_code, response.errors[0].message))
                    print(str.format(" metrics: {}", str(metrics_names)))
Esempio n. 4
0
def generate_fleet_report(pure1_api_id, pure1_pk_file, pure1_pk_pwd):

    pure1Client = pure1.Client(private_key_file=pure1_pk_file,
                               private_key_password=pure1_pk_pwd, app_id=pure1_api_id)

    # Get all  Arrays, FlashArray & FlashBlade.
    response = pure1Client.get_arrays()

    # Check to make sure we successfully connected, 200=OK
    if response.status_code != 200:
        display_response_error(response)
        return
    
    # this gets all the response items which is a
    # generator which has no length, by pulling all into a
    # list it has a length.
    arrays = list(response.items)
    if len(arrays) == 0:
        print("Error: No arrays returned by Pure1 API ")
        return

    with open('pure1_report_fa.csv', 'w') as csvfile_fa:
        with open('pure1_report_fb.csv', 'w') as csvfile_fb:
            # Create two CSV files for writing to
            filewriter_fa = csv.writer(csvfile_fa, delimiter=',',
                                    quotechar='|', quoting=csv.QUOTE_MINIMAL)
            filewriter_fa.writerow(['Array Name', 'Array ID', 'Model', 'OS Version',
                                'Total Capacity (TB)', 'Data Reduction', '% Used',
                                'Shared Space (TB)', 'Volume Space (TB)',
                                'Snapshot Space (TB)', 'System Space (GB)', 'Max Load 24Hours'])
            
            filewriter_fb = csv.writer(csvfile_fb, delimiter=',',
                                    quotechar='|', quoting=csv.QUOTE_MINIMAL)
            filewriter_fb.writerow(['Array Name', 'Array ID', 'Model', 'OS Version',
                                'Total Capacity (TB)', 'Data Reduction', '% Used',
                                'File System Space (TB)', 'Object Store Space (TB)'])

            # Go through all arrays
            count = 0
            for array in arrays:
                progress(count, len(arrays), "Getting array metrics...                      ")
                response = get_metrics(pure1Client, array, arrays, count)
                if not response:
                    # already printed error message  if response is None.
                    return
                    
                # Increase the count for the progress bar
                count += 1
                
                # we want to null all values, so we don't accidenlty return
                # a value from the last object into the current record
                total_capacity = data_reduction = volume_space = pcnt_used = fs_space = object_space = None
                shared_space = snapshot_space = system_space = array_load = effective_space = None

                # I think items is a generator and will 
                # return all the items as you request lazily, doing a list()
                # forces it to pull in all items into an easy python construct
                metrics_items = list(response.items)

                # we requested several metrics
                for metric_item in metrics_items:
                    if metric_item.data:
                        # Each metric has multiple data points, 1 each day for 7 days.
                        # they are  from older to newer, so we will keep the last value
                        # we find.
                        for metric_data in metric_item.data:
                            metric_name = metric_item.name
                            if metric_name == 'array_total_capacity':
                                total_capacity = round(metric_data[1] / BYTES_IN_A_TERABYTE, 2)
                            elif metric_name == 'array_data_reduction':
                                data_reduction = round(metric_data[1], 2)
                            elif metric_name == 'array_volume_space':
                                volume_space = round(metric_data[1] / BYTES_IN_A_TERABYTE, 2)
                            elif metric_name == 'array_shared_space':
                                shared_space = round(metric_data[1] / BYTES_IN_A_TERABYTE, 2)
                            elif metric_name == 'array_snapshot_space':
                                snapshot_space = round(metric_data[1] / BYTES_IN_A_TERABYTE, 2)
                            elif metric_name == 'array_file_system_space':
                                fs_space = round(metric_data[1] / BYTES_IN_A_TERABYTE, 2)
                            elif metric_name == 'array_object_store_space':
                                object_space = round(metric_data[1] / BYTES_IN_A_TERABYTE, 2)
                            elif metric_name == 'array_system_space':
                                system_space = round(metric_data[1] / BYTES_IN_A_GIGABYTE, 2)
                            elif metric_name == 'array_total_load': 
                                array_load = round(metric_data[1], 2)
                
                if 'FA' in array.os:
                    try:
                        # If for some reason it didn't return data, the value will
                        # be null and this will be a TypeError
                        # but we still want to capture the record even if there is a 
                        # bad value.
                        pcnt_used = round(100 * (volume_space + shared_space +
                                snapshot_space + system_space) / total_capacity, 2)
                    except TypeError as e:
                        pass
                    # Write entry into CSV file.
                    filewriter_fa.writerow([array.name, array.id, array.model, array.version, 
                                            total_capacity, data_reduction,
                                            pcnt_used, shared_space, volume_space, 
                                            snapshot_space, system_space, array_load])
                
                elif 'FB' in array.os:
                    try:
                        # Even if a value isn't returned and this fails we keep going.
                        pcnt_used = round(100 * (fs_space + object_space) / total_capacity, 2)
                    except TypeError as e:
                        pass
                    #write entry into CSV file
                    filewriter_fb.writerow([array.name, array.id, array.model, array.version, 
                                            total_capacity, data_reduction,
                                            pcnt_used, fs_space, object_space])
            
            # Make progress bar hit 100%  because.... that's nice.
            progress(1,1, 'Finished, {} result(s) saved into 2 csv files.'.format(len(arrays)))
            print("")