コード例 #1
0
def get_port_monitoring_results():
    port_monitoring_results = {}
    port_monitoring_results['checks'] = []
    port_monitoring_results['total_checks'] = 0
    port_monitoring_results['up'] = 0
    port_monitoring_results['down'] = 0
    port_monitoring_results['paused'] = 0
    port_monitoring_results['total_accounts'] = 0
    port_monitoring_results['failed_accounts'] = 0
    port_monitoring_results['working_accounts'] = 0
    port_monitoring_results['working_percentage'] = 100
    port_monitoring_results['up_percent'] = 0
    port_monitoring_results['down_percent'] = 0
    port_monitoring_results['paused_percent'] = 100
    for module in get_all_data('port_monitoring:*'):
        module_json = get_data(module)
        module_checks = json.loads(module_json)[0]
        port_monitoring_results['checks'] += module_checks

    for module in get_all_data('port_monitoring_success:*'):
        module_json = get_data(module)
        module_success = json.loads(module_json)[0]
        port_monitoring_results['up'] += module_success['up']
        port_monitoring_results['down'] += module_success['down']
        port_monitoring_results['paused'] += module_success['paused']
        port_monitoring_results['total_accounts'] += module_success[
            'total_accounts']
        # All data for each module is stored in one redis key.  If that
        # key is stale then we consider all accounts that module reports
        # as failed
        if module_success['valid_until'] < time.time() * 1000:
            port_monitoring_results['failed_accounts'] += module_success[
                'total_accounts']
        else:
            port_monitoring_results['failed_accounts'] += module_success[
                'failed_accounts']

    port_monitoring_results['total_checks'] = len(
        port_monitoring_results['checks'])
    # If there are no checks we can leave the up, down, paused and both
    # working percentages at their defaults
    # This should save a little time and avoid dividing by zero
    if port_monitoring_results['total_checks']:
        port_monitoring_results['down_percent'] = (
            port_monitoring_results['down'] /
            port_monitoring_results['total_checks']) * 100
        port_monitoring_results['paused_percent'] = (
            port_monitoring_results['paused'] /
            port_monitoring_results['total_checks']) * 100
        # Artificially forcing percentages to add up to 100%
        port_monitoring_results['up_percent'] = 100 - (
            port_monitoring_results['down_percent'] +
            port_monitoring_results['paused_percent'])
        port_monitoring_results['working_accounts'] = port_monitoring_results[
            'total_accounts'] - port_monitoring_results['failed_accounts']
        port_monitoring_results['working_percentage'] = (
            port_monitoring_results['working_accounts'] /
            port_monitoring_results['total_accounts']) * 100

    return port_monitoring_results
コード例 #2
0
def list_unreporting_servers():
    found_servers = set()
    for key in get_all_data('resources:*'):
        host_data = json.loads(get_data(key))[0]
        found_servers.add(host_data['name'])

    reporting_servers = set()
    tick_data, tick_data_validity = get_tick_data()
    for host in tick_data:
        host_data = tick_data[host]
        reporting_servers.add(host_data['name'])

    newrelic_infra_data, newrelic_infra_data_validity = get_newrelic_infra_data(
    )
    for host in newrelic_infra_data:
        host_data = newrelic_infra_data[host]
        reporting_servers.add(host_data['name'])

    prometheus_data, prometheus_data_validity = get_prometheus_data()
    for user in prometheus_data:
        for host in prometheus_data[user]:
            host_data = prometheus_data[user][host]
            reporting_servers.add(host_data['name'])

    datadog_data, datadog_data_validity = get_datadog_data()
    for user in datadog_data:
        for host in datadog_data[user]:
            host_data = datadog_data[user][host]
            reporting_servers.add(host_data['name'])

    return found_servers - reporting_servers
コード例 #3
0
def get_calendar_items():
    calendar_items = []
    calendar_keys = sorted(
        get_all_data('calendar_*'))  # Get all the calendar keys from Redis
    for key in calendar_keys:
        old_date = key.replace('calendar_', '')
        convert = datetime.datetime.strptime(
            old_date,
            '%Y-%m-%d')  # Convert the date to a nice format for the Warboard
        calendar_items.append({convert.strftime('%a %d %B'): get_data(key)})
    return (calendar_items)
コード例 #4
0
def store_calendar_items():
    with open(calendar_export) as c_file:
        try:
            c_data = json.load(c_file)
        except ValueError:
            c_data = False
    c_file.close()
    if c_data != False:
        prune_calendar_items()
        for item in c_data['items']:
            if 'dateTime' in item['start']:  # Check if the datetime is set
                item['start']['date'] = item['start']['dateTime'].split(
                    'T'
                )[0]  # Split the datetime to get the date and set the data parameter
                current_summary = item['summary']
                try:
                    start_time = datetime.datetime.strptime(
                        item['start']['dateTime'].split('T')[1],
                        '%H:%M:%SZ').strftime(
                            '%H:%M')  # Convert the start time to a nice date
                    end_time = datetime.datetime.strptime(
                        item['end']['dateTime'].split('T')[1],
                        '%H:%M:%SZ').strftime(
                            '%H:%M: ')  # Convert the end time to a nice date
                except ValueError:
                    start_time = datetime.datetime.strptime(
                        item['start']['dateTime'].split('T')[1],
                        '%H:%M:%S+01:00').strftime(
                            '%H:%M')  # To work with DST times
                    end_time = datetime.datetime.strptime(
                        item['end']['dateTime'].split('T')[1],
                        '%H:%M:%S+01:00').strftime('%H:%M: ')
                item['summary'] = '{} - {}{}'.format(
                    start_time, end_time, current_summary
                )  # Add the start and end time to the summary
            current = get_data(
                'calendar_{}'.format(item['start']['date'])
            )  # Check if an existing key exists for the date in question
            if current == None:
                set_data('calendar_{}'.format(item['start']['date']),
                         item['summary'])  # If a date doesn't exist create one
            elif item[
                    'summary'] not in current:  # If a key exists but it's not the current summary it means we have two items for one date
                set_data('calendar_{}'.format(item['start']['date']),
                         '{}{}{}'.format(
                             current, calendar_split,
                             item['summary']))  # Append to the existing item
    else:
        logger.error('Could not parse calendar')
コード例 #5
0
ファイル: resources.py プロジェクト: dogsbodytech/warboard
def get_resource_results():
    """
    Merges lists returned by resource modules into one list in the correct
    format for warboard.html to display monitored resources

    {% for check in resource_results['checks']|sort(attribute='orderby')|reverse %}

    <tr class="danger lead"><td>{{ check['name'] }}</td><td>{{ check['summary']['cpu'] }}%</td><td>{{ check['summary']['memory'] }}%</td><td>{{ check['summary']['disk_io'] }}%</td><td>{{ check['summary']['fullest_disk'] }}%</td></tr>

    """
    resource_results = {}
    resource_results['checks'] = []
    resource_results['green'] = 0
    resource_results['red'] = 0
    resource_results['orange'] = 0
    resource_results['blue'] = 0
    resource_results['failed_accounts'] = 0
    resource_results['total_accounts'] = 0
    resource_results['total_checks'] = 0

    # Defaults for when no data is reported, working towards having modules be
    # modular / optional
    resource_results['blue_percent'] = 100
    resource_results['red_percent'] = 0
    resource_results['orange_percent'] = 0
    resource_results['green_percent'] = 0
    resource_results['working_percentage'] = 100

    # Check how many accounts failed from each module and add them to the
    # total failed accounts.  If the data from a module is considered stale
    # then all of it's accounts will be considered failed.
    milliseconds_since_epoch = time.time() * 1000
    for module in get_all_data('resources_success:*'):
        module_success_json = get_data(module)
        module_success = json.loads(module_success_json)[0]
        resource_results['total_accounts'] += module_success['total_accounts']
        resource_results['total_checks'] += module_success['total_checks']
        milliseconds_since_epoch_module_data_is_valid_until = module_success['valid_until']
        if milliseconds_since_epoch > milliseconds_since_epoch_module_data_is_valid_until:
            resource_results['failed_accounts'] += module_success['total_accounts']
            logger.error('Data for {} is stale, please check the daemon is functioning properly'.format(module))
        else:
            resource_results['failed_accounts'] += module_success['failed_accounts']

    # We will count checks in so we can compare it against the number of checks
    # reported by the daemon
    checks_found = 0
    # Get list of keys in the format resources:module#uuid
    for host in get_all_data('resources:*'):
        try:
            # Storing lists with only one value since when I convert
            # dictionaries to json and store them in redis they come back as
            # strings, I am working around this by storing lists,
            # ast.literal_eval also works
            host_data = json.loads(get_data(host))[0]
            resource_results['checks'].append(host_data)
            # get the health status colour of the current check, and then add
            # one to the number of checks with that health status
            resource_results[host_data['health_status']] += 1
            checks_found += 1
        except Exception as e:
            # I would rather log to uwsgi's log but I'll sort this out later
            logger.error('Data for {} is not in a valid format: {}'.format(host, e))

    # If we are getting back old checks that are no-longer reporting hence
    # are not in the total_checks variable then they have failed.
    # If we are getting back less checks than we stored then something has
    # gone really wrong or we caught the weekly cron that clears the keys.
    if resource_results['total_checks'] != checks_found:
        logger.info('The number of checks stored in the database doesn\'t '\
            'match the number reported by the daemon, it is likely some '\
            'servers are no-longer reporting, run '\
            'resources_list_unreporting_servers.py to look into this.')

    # The number of checks we are outputing is authoritive over the number
    # we expected to be there, at the moment we are just logging the fact they
    # were different, it would be nice to have a visual display or send an
    # email but there isn't a correct place to do this at the moment
    resource_results['total_checks'] = checks_found

    total_results = resource_results['green'] + resource_results['red'] + resource_results['orange'] + resource_results['blue']
    if total_results != 0:
        resource_results['red_percent'] = ( resource_results['red'] / total_results ) * 100
        resource_results['orange_percent'] = ( resource_results['orange'] / total_results ) * 100
        resource_results['blue_percent'] = ( resource_results['blue'] / total_results ) * 100
        # I want the percentage to always be 100 and green seems the most
        # disposable / least affected by any rounding issues
        resource_results['green_percent'] = 100 - ( resource_results['red_percent'] + resource_results['orange_percent'] + resource_results['blue_percent'] )

    resource_results['working_percentage'] = 100 - (( resource_results['failed_accounts'] / resource_results['total_accounts'] ) * 100 )
    resource_results['working_accounts'] = resource_results['total_accounts'] - resource_results['failed_accounts']
    logger.debug('working_percentage: {}'.format(resource_results['working_percentage']))
    return resource_results