Пример #1
0
    def update_statistics(node, stat, elapsed_time=None):

        logger.debug(
            'Cluster updating: node {0} statistics time: {1:03f}'.format(
                node, elapsed_time))

        collect_ts = mh.elliptics_time_to_ts(stat['timestamp'])

        try:
            try:
                node.update_statistics(stat, collect_ts)
            except KeyError as e:
                logger.warn('Bad procfs stat for node {0} ({1}): {2}'.format(
                    node, e, stat))
                pass

            fss = set()
            good_node_backends = []

            backend_stats = NodeInfoUpdater._parsed_stats(stat['stats'])

            for b_stat in stat['backends'].itervalues():
                try:
                    NodeInfoUpdater._process_backend_statistics(
                        node, b_stat, backend_stats, collect_ts, fss,
                        good_node_backends)
                except Exception:
                    backend_id = b_stat['backend_id']
                    logger.exception(
                        'Failed to process backend {} stats on node {}'.format(
                            backend_id, node))
                    continue

            logger.debug(
                'Cluster updating: node {}, updating FS commands stats'.format(
                    node))
            for fs in fss:
                fs.update_commands_stats()

            logger.debug(
                'Cluster updating: node {}, updating node commands stats'.
                format(node))
            node.update_commands_stats(good_node_backends)

        except Exception as e:
            logger.exception(
                'Unable to process statistics for node {}'.format(node))
        finally:
            logger.debug(
                'Cluster updating: node {}, statistics processed'.format(node))
Пример #2
0
    def update_statistics(node, stat, elapsed_time=None):

        logger.debug(
            'Cluster updating: node {0} statistics time: {1:03f}'.format(
                node, elapsed_time
            )
        )

        collect_ts = mh.elliptics_time_to_ts(stat['timestamp'])

        try:
            try:
                node.update_statistics(stat, collect_ts)
            except KeyError as e:
                logger.warn('Bad procfs stat for node {0} ({1}): {2}'.format(node, e, stat))
                pass

            fss = set()
            good_node_backends = []

            backend_stats = NodeInfoUpdater._parsed_stats(stat['stats'])

            for b_stat in stat['backends'].itervalues():
                try:
                    NodeInfoUpdater._process_backend_statistics(
                        node,
                        b_stat,
                        backend_stats,
                        collect_ts,
                        fss,
                        good_node_backends
                    )
                except Exception:
                    backend_id = b_stat['backend_id']
                    logger.exception(
                        'Failed to process backend {} stats on node {}'.format(backend_id, node)
                    )
                    continue

            logger.debug('Cluster updating: node {}, updating FS commands stats'.format(node))
            for fs in fss:
                fs.update_commands_stats()

            logger.debug('Cluster updating: node {}, updating node commands stats'.format(node))
            node.update_commands_stats(good_node_backends)

        except Exception as e:
            logger.exception('Unable to process statistics for node {}'.format(node))
        finally:
            logger.debug('Cluster updating: node {}, statistics processed'.format(node))
Пример #3
0
    def update_statistics(node, stat, elapsed_time=None):

        logger.debug(
            'Cluster updating: node {0} statistics time: {1:03f}'.format(
                node, elapsed_time))

        collect_ts = mh.elliptics_time_to_ts(stat['timestamp'])

        try:
            try:
                node.update_statistics(stat, collect_ts)
            except KeyError as e:
                logger.warn('Bad procfs stat for node {0} ({1}): {2}'.format(
                    node, e, stat))
                pass

            fss = set()
            good_node_backends = []

            backend_stats = NodeInfoUpdater._parsed_stats(stat['stats'])

            for b_stat in stat['backends'].itervalues():
                backend_id = b_stat['backend_id']
                b_stat['stats'] = backend_stats.get(backend_id, {})

                update_group_history = False

                node_backend_addr = '{0}/{1}'.format(node, backend_id)
                if node_backend_addr not in storage.node_backends:
                    node_backend = storage.node_backends.add(node, backend_id)
                    update_group_history = True
                else:
                    node_backend = storage.node_backends[node_backend_addr]

                nb_config = (b_stat['config'] if 'config' in b_stat else
                             b_stat['backend']['config'])

                gid = nb_config['group']

                if gid == 0:
                    # skip zero group ids
                    continue

                if b_stat['status']['state'] != 1:
                    logger.info(
                        'Node backend {0} is not enabled: state {1}'.format(
                            str(node_backend), b_stat['status']['state']))
                    node_backend.disable()
                    continue

                if gid not in storage.groups:
                    logger.debug('Adding group {0}'.format(gid))
                    group = storage.groups.add(gid)
                else:
                    group = storage.groups[gid]

                if 'vfs' not in b_stat['backend']:
                    logger.error(
                        'Failed to parse statistics for node backend {0}, '
                        'vfs key not found: {1}'.format(node_backend, b_stat))
                    continue

                fsid = b_stat['backend']['vfs']['fsid']
                fsid_key = '{host}:{fsid}'.format(host=node.host, fsid=fsid)

                if fsid_key not in storage.fs:
                    logger.debug('Adding fs {0}'.format(fsid_key))
                    fs = storage.fs.add(node.host, fsid)
                else:
                    fs = storage.fs[fsid_key]

                if node_backend not in fs.node_backends:
                    fs.add_node_backend(node_backend)
                fs.update_statistics(b_stat['backend'], collect_ts)

                fss.add(fs)
                good_node_backends.append(node_backend)

                node_backend.enable()

                logger.info('Updating statistics for node backend %s' %
                            (str(node_backend)))
                if 'backend' not in b_stat:
                    logger.warn('No backend in b_stat: {0}'.format(b_stat))
                elif 'dstat' not in b_stat['backend']:
                    logger.warn('No dstat in backend: {0}'.format(
                        b_stat['backend']))

                prev_base_path = node_backend.base_path
                try:
                    node_backend.update_statistics(b_stat, collect_ts)
                except KeyError as e:
                    logger.warn(
                        'Bad stat for node backend {0} ({1}): {2}'.format(
                            node_backend, e, b_stat))
                    pass

                if node_backend.base_path != prev_base_path:
                    update_group_history = True

                if b_stat['status'][
                        'read_only'] or node_backend.stat_commit_errors > 0:
                    node_backend.make_read_only()
                else:
                    node_backend.make_writable()

                if node_backend.group is not group:
                    logger.debug(
                        'Adding node backend {0} to group {1}{2}'.format(
                            node_backend, group.group_id,
                            ' (moved from group {0})'.format(
                                node_backend.group.group_id)
                            if node_backend.group else ''))
                    update_group_history = True
                    group.add_node_backend(node_backend)

                if update_group_history:
                    infrastructure.update_group_history(group)

            for fs in fss:
                fs.update_commands_stats()
            node.update_commands_stats(good_node_backends)

        except Exception as e:
            logger.exception(
                'Unable to process statistics for node {}'.format(node))
    def update_statistics(node, stat, elapsed_time=None):

        logger.debug(
            'Cluster updating: node {0} statistics time: {1:03f}'.format(
                node, elapsed_time
            )
        )

        collect_ts = mh.elliptics_time_to_ts(stat['timestamp'])

        try:
            try:
                node.update_statistics(stat, collect_ts)
            except KeyError as e:
                logger.warn('Bad procfs stat for node {0} ({1}): {2}'.format(node, e, stat))
                pass

            fss = set()
            good_node_backends = []

            backend_stats = NodeInfoUpdater._parsed_stats(stat['stats'])

            for b_stat in stat['backends'].itervalues():
                backend_id = b_stat['backend_id']
                b_stat['stats'] = backend_stats.get(backend_id, {})

                update_group_history = False

                node_backend_addr = '{0}/{1}'.format(node, backend_id)
                if node_backend_addr not in storage.node_backends:
                    node_backend = storage.node_backends.add(node, backend_id)
                    update_group_history = True
                else:
                    node_backend = storage.node_backends[node_backend_addr]

                nb_config = (b_stat['config']
                             if 'config' in b_stat else
                             b_stat['backend']['config'])

                gid = nb_config['group']

                if gid == 0:
                    # skip zero group ids
                    continue

                if b_stat['status']['state'] != 1:
                    logger.info('Node backend {0} is not enabled: state {1}'.format(
                        str(node_backend), b_stat['status']['state']))
                    node_backend.disable()
                    continue

                if gid not in storage.groups:
                    logger.debug('Adding group {0}'.format(gid))
                    group = storage.groups.add(gid)
                else:
                    group = storage.groups[gid]

                if 'vfs' not in b_stat['backend']:
                    logger.error(
                        'Failed to parse statistics for node backend {0}, '
                        'vfs key not found: {1}'.format(node_backend, b_stat))
                    continue

                fsid = b_stat['backend']['vfs']['fsid']
                fsid_key = '{host}:{fsid}'.format(host=node.host, fsid=fsid)

                if fsid_key not in storage.fs:
                    logger.debug('Adding fs {0}'.format(fsid_key))
                    fs = storage.fs.add(node.host, fsid)
                else:
                    fs = storage.fs[fsid_key]

                if node_backend not in fs.node_backends:
                    fs.add_node_backend(node_backend)
                fs.update_statistics(b_stat['backend'], collect_ts)

                fss.add(fs)
                good_node_backends.append(node_backend)

                node_backend.enable()

                logger.info('Updating statistics for node backend %s' % (str(node_backend)))
                if 'backend' not in b_stat:
                    logger.warn('No backend in b_stat: {0}'.format(b_stat))
                elif 'dstat' not in b_stat['backend']:
                    logger.warn('No dstat in backend: {0}'.format(b_stat['backend']))

                prev_base_path = node_backend.base_path
                try:
                    node_backend.update_statistics(b_stat, collect_ts)
                except KeyError as e:
                    logger.warn('Bad stat for node backend {0} ({1}): {2}'.format(
                        node_backend, e, b_stat))
                    pass

                if node_backend.base_path != prev_base_path:
                    update_group_history = True

                if b_stat['status']['read_only'] or node_backend.stat_commit_errors > 0:
                    node_backend.make_read_only()
                else:
                    node_backend.make_writable()

                if node_backend.group is not group:
                    logger.debug('Adding node backend {0} to group {1}{2}'.format(
                        node_backend, group.group_id,
                        ' (moved from group {0})'.format(node_backend.group.group_id)
                        if node_backend.group else ''))
                    update_group_history = True
                    group.add_node_backend(node_backend)

                if update_group_history:
                    infrastructure.update_group_history(group)

            for fs in fss:
                fs.update_commands_stats()
            node.update_commands_stats(good_node_backends)

        except Exception as e:
            logger.exception('Unable to process statistics for node {}'.format(node))