Пример #1
0
        def _process_group_metadata(response, group, elapsed_time=None, end_time=None):
            logger.debug('Cluster updating: group {0} meta key read time: {1}.{2}'.format(
                group.group_id, elapsed_time.tsec, elapsed_time.tnsec))
            meta = response.data

            group.parse_meta(meta)
            couple = group.meta.get('couple')
            if couple is None:
                logger.error('Read symmetric groups from group {} (no couple data): {}'.format(
                    group.group_id, group.meta))
                return

            logger.info('Read symmetric groups from group {}: {}'.format(group.group_id, couple))
            for gid in couple:
                if gid != group.group_id:
                    logger.info('Scheduling update for group {}'.format(gid))
                    _queue.add(gid)

            couple_str = ':'.join((str(gid) for gid in sorted(couple)))

            logger.debug('{0} in storage.couples: {1}'.format(
                couple_str, couple_str in storage.couples))

            if couple_str not in storage.couples and couple_str not in storage.cache_couples:

                ns_id = group.meta.get('namespace')
                if ns_id is None:
                    logger.error('Inconsistent meta read from group {}, '
                                 'missing namespace: {}'.format(group, group.meta))
                    return

                if group.type == storage.Group.TYPE_DATA:
                    logger.info('Creating couple {0}'.format(couple_str))
                    for gid in couple:
                        if gid not in storage.groups:
                            logger.info('Group {} is not found adding fake group for '
                                        'couple {}'.format(gid, couple))
                            storage.groups.add(gid)
                    c = storage.couples.add(storage.groups[gid] for gid in couple)
                    logger.info('Created couple {0} {1}'.format(c, repr(c)))
                elif group.type == storage.Group.TYPE_CACHE:
                    logger.info('Creating cache couple {0}'.format(couple_str))
                    c = storage.cache_couples.add(storage.groups[gid] for gid in couple)
                    logger.info('Created cache couple {0} {1}'.format(c, repr(c)))
                else:
                    raise ValueError('Unknown group type for group {}: {}'.format(
                        group, group.type))

                for gid in couple:
                    infrastructure.update_group_history(storage.groups[gid])

                if ns_id not in storage.namespaces:
                    logger.info('Creating storage namespace {}'.format(ns_id))
                    ns = storage.namespaces.add(ns_id)
                else:
                    ns = storage.namespaces[ns_id]

                ns.add_couple(c)
            return
Пример #2
0
        def _create_groupset_if_needed(groups, group_type, ns_id):

            for gid in groups:
                if gid not in storage.groups:
                    logger.info(
                        'Group {group} is not found, adding fake group '
                        'for groupset {groups}'.format(
                            group=gid,
                            groups=groups,
                        ))
                    storage.groups.add(gid)

            groupset_str = ':'.join((str(gid) for gid in sorted(groups)))
            if groupset_str not in storage.groupsets:
                # TODO: somehow check that couple type matches group.type
                # for all groups in couple (not very easy when metakey read
                # fails)
                logger.info(
                    'Creating groupset {groups}, group type "{group_type}"'.
                    format(
                        groups=groupset_str,
                        group_type=group_type,
                    ))
                c = storage.groupsets.add(
                    groups=(storage.groups[gid] for gid in groups),
                    group_type=group_type,
                )

                for gid in groups:
                    infrastructure.update_group_history(storage.groups[gid])

                if ns_id not in storage.namespaces:
                    logger.info('Creating storage namespace {}'.format(ns_id))
                    ns = storage.namespaces.add(ns_id)
                else:
                    ns = storage.namespaces[ns_id]

                ns.add_couple(c)
            return storage.groupsets[groupset_str]
Пример #3
0
        def _create_groupset_if_needed(groups, group_type, ns_id):

            for gid in groups:
                if gid not in storage.groups:
                    logger.info(
                        'Group {group} is not found, adding fake group '
                        'for groupset {groups}'.format(
                            group=gid,
                            groups=groups,
                        )
                    )
                    storage.groups.add(gid)

            groupset_str = ':'.join((str(gid) for gid in sorted(groups)))
            if groupset_str not in storage.groupsets:
                # TODO: somehow check that couple type matches group.type
                # for all groups in couple (not very easy when metakey read
                # fails)
                logger.info('Creating groupset {groups}, group type "{group_type}"'.format(
                    groups=groupset_str,
                    group_type=group_type,
                ))
                c = storage.groupsets.add(
                    groups=(storage.groups[gid] for gid in groups),
                    group_type=group_type,
                )

                for gid in groups:
                    infrastructure.update_group_history(storage.groups[gid])

                if ns_id not in storage.namespaces:
                    logger.info('Creating storage namespace {}'.format(ns_id))
                    ns = storage.namespaces.add(ns_id)
                else:
                    ns = storage.namespaces[ns_id]

                ns.add_couple(c)
            return storage.groupsets[groupset_str]
Пример #4
0
    def _process_backend_statistics(node, b_stat, backend_stats, collect_ts,
                                    processed_fss, processed_node_backends):

        backend_id = b_stat['backend_id']

        nb_config = (b_stat['config']
                     if 'config' in b_stat else b_stat['backend']['config'])
        gid = nb_config['group']

        if gid == 0:
            # skip zero group ids
            return

        b_stat['stats'] = backend_stats.get(backend_id, {})

        update_group_history = False

        node_backend_addr = '{0}/{1}'.format(node, backend_id)
        if node_backend_addr not in storage.node_backends:
            node_backend = storage.node_backends.add(node, backend_id)
            update_group_history = True
        else:
            node_backend = storage.node_backends[node_backend_addr]

        if b_stat['status']['state'] != 1:
            logger.info('Node backend {0} is not enabled: state {1}'.format(
                str(node_backend), b_stat['status']['state']))
            node_backend.disable()
            return

        node_backend.enable()

        if gid not in storage.groups:
            logger.debug('Adding group {0}'.format(gid))
            group = storage.groups.add(gid)
        else:
            group = storage.groups[gid]

        fsid = b_stat['backend']['vfs']['fsid']
        fsid_key = '{host}:{fsid}'.format(host=node.host, fsid=fsid)

        if fsid_key not in storage.fs:
            logger.debug('Adding fs {0}'.format(fsid_key))
            fs = storage.fs.add(node.host, fsid)
        else:
            fs = storage.fs[fsid_key]

        if node_backend not in fs.node_backends:
            fs.add_node_backend(node_backend)

        if fs not in processed_fss:
            fs.update_statistics(b_stat['backend'], collect_ts)
            processed_fss.add(fs)

        logger.info(
            'Updating statistics for node backend {}'.format(node_backend))
        prev_base_path = node_backend.base_path
        try:
            node_backend.update_statistics(b_stat, collect_ts)
        except KeyError as e:
            logger.warn('Bad stat for node backend {0} ({1}): {2}'.format(
                node_backend, e, b_stat))
            pass

        if node_backend.base_path != prev_base_path:
            update_group_history = True

        if b_stat['status']['read_only'] or node_backend.stat_commit_errors > 0:
            node_backend.make_read_only()
        else:
            node_backend.make_writable()

        if node_backend.group is not group:
            logger.debug('Adding node backend {0} to group {1}{2}'.format(
                node_backend, group.group_id,
                ' (moved from group {0})'.format(node_backend.group.group_id)
                if node_backend.group else ''))
            group.add_node_backend(node_backend)
            update_group_history = True

        # these backends' commands stat are used later to update accumulated
        # node commands stat
        processed_node_backends.append(node_backend)

        if update_group_history:
            logger.debug(
                'Group {} history may be outdated, adding to update queue'.
                format(group))
            infrastructure.update_group_history(group)
Пример #5
0
        def _process_group_metadata(response,
                                    group,
                                    elapsed_time=None,
                                    end_time=None):
            logger.debug(
                'Cluster updating: group {0} meta key read time: {1}.{2}'.
                format(group.group_id, elapsed_time.tsec, elapsed_time.tnsec))
            meta = response.data

            group.parse_meta(meta)
            couple = group.meta.get('couple')
            if couple is None:
                logger.error(
                    'Read symmetric groups from group {} (no couple data): {}'.
                    format(group.group_id, meta))
                return

            logger.info('Read symmetric groups from group {}: {}'.format(
                group.group_id, couple))
            for gid in couple:
                if gid != group.group_id:
                    logger.info('Scheduling update for group {}'.format(gid))
                    _queue.add(gid)

            couple_str = ':'.join((str(gid) for gid in sorted(couple)))

            logger.debug('{0} in storage.couples: {1}'.format(
                couple_str, couple_str in storage.couples))

            if couple_str not in storage.couples and couple_str not in storage.cache_couples:

                ns_id = group.meta.get('namespace')
                if ns_id is None:
                    logger.error('Inconsistent meta read from group {}, '
                                 'missing namespace: {}'.format(
                                     group, group.meta))
                    return

                if group.type == storage.Group.TYPE_DATA:
                    logger.info('Creating couple {0}'.format(couple_str))
                    for gid in couple:
                        if gid not in storage.groups:
                            logger.info(
                                'Group {} is not found adding fake group for '
                                'couple {}'.format(gid, couple))
                            storage.groups.add(gid)
                    c = storage.couples.add(
                        [storage.groups[gid] for gid in couple])
                    logger.info('Created couple {0} {1}'.format(c, repr(c)))
                elif group.type == storage.Group.TYPE_CACHE:
                    logger.info('Creating cache couple {0}'.format(couple_str))
                    c = storage.cache_couples.add(
                        [storage.groups[gid] for gid in couple])
                    logger.info('Created cache couple {0} {1}'.format(
                        c, repr(c)))
                else:
                    raise ValueError(
                        'Unknown group type for group {}: {}'.format(
                            group, group.type))

                for gid in couple:
                    infrastructure.update_group_history(storage.groups[gid])

                if ns_id not in storage.namespaces:
                    logger.info('Creating storage namespace {}'.format(ns_id))
                    ns = storage.namespaces.add(ns_id)
                else:
                    ns = storage.namespaces[ns_id]

                ns.add_couple(c)
            return
Пример #6
0
    def update_statistics(node, stat, elapsed_time=None):

        logger.debug(
            'Cluster updating: node {0} statistics time: {1:03f}'.format(
                node, elapsed_time))

        collect_ts = mh.elliptics_time_to_ts(stat['timestamp'])

        try:
            try:
                node.update_statistics(stat, collect_ts)
            except KeyError as e:
                logger.warn('Bad procfs stat for node {0} ({1}): {2}'.format(
                    node, e, stat))
                pass

            fss = set()
            good_node_backends = []

            backend_stats = NodeInfoUpdater._parsed_stats(stat['stats'])

            for b_stat in stat['backends'].itervalues():
                backend_id = b_stat['backend_id']
                b_stat['stats'] = backend_stats.get(backend_id, {})

                update_group_history = False

                node_backend_addr = '{0}/{1}'.format(node, backend_id)
                if node_backend_addr not in storage.node_backends:
                    node_backend = storage.node_backends.add(node, backend_id)
                    update_group_history = True
                else:
                    node_backend = storage.node_backends[node_backend_addr]

                nb_config = (b_stat['config'] if 'config' in b_stat else
                             b_stat['backend']['config'])

                gid = nb_config['group']

                if gid == 0:
                    # skip zero group ids
                    continue

                if b_stat['status']['state'] != 1:
                    logger.info(
                        'Node backend {0} is not enabled: state {1}'.format(
                            str(node_backend), b_stat['status']['state']))
                    node_backend.disable()
                    continue

                if gid not in storage.groups:
                    logger.debug('Adding group {0}'.format(gid))
                    group = storage.groups.add(gid)
                else:
                    group = storage.groups[gid]

                if 'vfs' not in b_stat['backend']:
                    logger.error(
                        'Failed to parse statistics for node backend {0}, '
                        'vfs key not found: {1}'.format(node_backend, b_stat))
                    continue

                fsid = b_stat['backend']['vfs']['fsid']
                fsid_key = '{host}:{fsid}'.format(host=node.host, fsid=fsid)

                if fsid_key not in storage.fs:
                    logger.debug('Adding fs {0}'.format(fsid_key))
                    fs = storage.fs.add(node.host, fsid)
                else:
                    fs = storage.fs[fsid_key]

                if node_backend not in fs.node_backends:
                    fs.add_node_backend(node_backend)
                fs.update_statistics(b_stat['backend'], collect_ts)

                fss.add(fs)
                good_node_backends.append(node_backend)

                node_backend.enable()

                logger.info('Updating statistics for node backend %s' %
                            (str(node_backend)))
                if 'backend' not in b_stat:
                    logger.warn('No backend in b_stat: {0}'.format(b_stat))
                elif 'dstat' not in b_stat['backend']:
                    logger.warn('No dstat in backend: {0}'.format(
                        b_stat['backend']))

                prev_base_path = node_backend.base_path
                try:
                    node_backend.update_statistics(b_stat, collect_ts)
                except KeyError as e:
                    logger.warn(
                        'Bad stat for node backend {0} ({1}): {2}'.format(
                            node_backend, e, b_stat))
                    pass

                if node_backend.base_path != prev_base_path:
                    update_group_history = True

                if b_stat['status'][
                        'read_only'] or node_backend.stat_commit_errors > 0:
                    node_backend.make_read_only()
                else:
                    node_backend.make_writable()

                if node_backend.group is not group:
                    logger.debug(
                        'Adding node backend {0} to group {1}{2}'.format(
                            node_backend, group.group_id,
                            ' (moved from group {0})'.format(
                                node_backend.group.group_id)
                            if node_backend.group else ''))
                    update_group_history = True
                    group.add_node_backend(node_backend)

                if update_group_history:
                    infrastructure.update_group_history(group)

            for fs in fss:
                fs.update_commands_stats()
            node.update_commands_stats(good_node_backends)

        except Exception as e:
            logger.exception(
                'Unable to process statistics for node {}'.format(node))
Пример #7
0
    def _process_backend_statistics(node,
                                    b_stat,
                                    backend_stats,
                                    collect_ts,
                                    processed_fss,
                                    processed_node_backends):

        backend_id = b_stat['backend_id']

        nb_config = (b_stat['config']
                     if 'config' in b_stat else
                     b_stat['backend']['config'])
        gid = nb_config['group']

        if gid == 0:
            # skip zero group ids
            return

        b_stat['stats'] = backend_stats.get(backend_id, {})

        update_group_history = False

        node_backend_addr = '{0}/{1}'.format(node, backend_id)
        if node_backend_addr not in storage.node_backends:
            node_backend = storage.node_backends.add(node, backend_id)
            update_group_history = True
        else:
            node_backend = storage.node_backends[node_backend_addr]

        if b_stat['status']['state'] != 1:
            logger.info('Node backend {0} is not enabled: state {1}'.format(
                str(node_backend), b_stat['status']['state']))
            node_backend.disable()
            return

        node_backend.enable()

        if gid not in storage.groups:
            logger.debug('Adding group {0}'.format(gid))
            group = storage.groups.add(gid)
        else:
            group = storage.groups[gid]

        fsid = b_stat['backend']['vfs']['fsid']
        fsid_key = '{host}:{fsid}'.format(host=node.host, fsid=fsid)

        if fsid_key not in storage.fs:
            logger.debug('Adding fs {0}'.format(fsid_key))
            fs = storage.fs.add(node.host, fsid)
        else:
            fs = storage.fs[fsid_key]

        if node_backend not in fs.node_backends:
            fs.add_node_backend(node_backend)

        if fs not in processed_fss:
            fs.update_statistics(b_stat['backend'], collect_ts)
            processed_fss.add(fs)

        logger.info('Updating statistics for node backend {}'.format(node_backend))
        prev_base_path = node_backend.base_path
        try:
            node_backend.update_statistics(b_stat, collect_ts)
        except KeyError as e:
            logger.warn('Bad stat for node backend {0} ({1}): {2}'.format(
                node_backend, e, b_stat))
            pass

        if node_backend.base_path != prev_base_path:
            update_group_history = True

        if b_stat['status']['read_only'] or node_backend.stat_commit_errors > 0:
            node_backend.make_read_only()
        else:
            node_backend.make_writable()

        if node_backend.group is not group:
            logger.debug('Adding node backend {0} to group {1}{2}'.format(
                node_backend, group.group_id,
                ' (moved from group {0})'.format(node_backend.group.group_id)
                if node_backend.group else ''))
            group.add_node_backend(node_backend)
            update_group_history = True

        # these backends' commands stat are used later to update accumulated
        # node commands stat
        processed_node_backends.append(node_backend)

        if update_group_history:
            logger.debug('Group {} history may be outdated, adding to update queue'.format(group))
            infrastructure.update_group_history(group)
    def update_statistics(node, stat, elapsed_time=None):

        logger.debug(
            'Cluster updating: node {0} statistics time: {1:03f}'.format(
                node, elapsed_time
            )
        )

        collect_ts = mh.elliptics_time_to_ts(stat['timestamp'])

        try:
            try:
                node.update_statistics(stat, collect_ts)
            except KeyError as e:
                logger.warn('Bad procfs stat for node {0} ({1}): {2}'.format(node, e, stat))
                pass

            fss = set()
            good_node_backends = []

            backend_stats = NodeInfoUpdater._parsed_stats(stat['stats'])

            for b_stat in stat['backends'].itervalues():
                backend_id = b_stat['backend_id']
                b_stat['stats'] = backend_stats.get(backend_id, {})

                update_group_history = False

                node_backend_addr = '{0}/{1}'.format(node, backend_id)
                if node_backend_addr not in storage.node_backends:
                    node_backend = storage.node_backends.add(node, backend_id)
                    update_group_history = True
                else:
                    node_backend = storage.node_backends[node_backend_addr]

                nb_config = (b_stat['config']
                             if 'config' in b_stat else
                             b_stat['backend']['config'])

                gid = nb_config['group']

                if gid == 0:
                    # skip zero group ids
                    continue

                if b_stat['status']['state'] != 1:
                    logger.info('Node backend {0} is not enabled: state {1}'.format(
                        str(node_backend), b_stat['status']['state']))
                    node_backend.disable()
                    continue

                if gid not in storage.groups:
                    logger.debug('Adding group {0}'.format(gid))
                    group = storage.groups.add(gid)
                else:
                    group = storage.groups[gid]

                if 'vfs' not in b_stat['backend']:
                    logger.error(
                        'Failed to parse statistics for node backend {0}, '
                        'vfs key not found: {1}'.format(node_backend, b_stat))
                    continue

                fsid = b_stat['backend']['vfs']['fsid']
                fsid_key = '{host}:{fsid}'.format(host=node.host, fsid=fsid)

                if fsid_key not in storage.fs:
                    logger.debug('Adding fs {0}'.format(fsid_key))
                    fs = storage.fs.add(node.host, fsid)
                else:
                    fs = storage.fs[fsid_key]

                if node_backend not in fs.node_backends:
                    fs.add_node_backend(node_backend)
                fs.update_statistics(b_stat['backend'], collect_ts)

                fss.add(fs)
                good_node_backends.append(node_backend)

                node_backend.enable()

                logger.info('Updating statistics for node backend %s' % (str(node_backend)))
                if 'backend' not in b_stat:
                    logger.warn('No backend in b_stat: {0}'.format(b_stat))
                elif 'dstat' not in b_stat['backend']:
                    logger.warn('No dstat in backend: {0}'.format(b_stat['backend']))

                prev_base_path = node_backend.base_path
                try:
                    node_backend.update_statistics(b_stat, collect_ts)
                except KeyError as e:
                    logger.warn('Bad stat for node backend {0} ({1}): {2}'.format(
                        node_backend, e, b_stat))
                    pass

                if node_backend.base_path != prev_base_path:
                    update_group_history = True

                if b_stat['status']['read_only'] or node_backend.stat_commit_errors > 0:
                    node_backend.make_read_only()
                else:
                    node_backend.make_writable()

                if node_backend.group is not group:
                    logger.debug('Adding node backend {0} to group {1}{2}'.format(
                        node_backend, group.group_id,
                        ' (moved from group {0})'.format(node_backend.group.group_id)
                        if node_backend.group else ''))
                    update_group_history = True
                    group.add_node_backend(node_backend)

                if update_group_history:
                    infrastructure.update_group_history(group)

            for fs in fss:
                fs.update_commands_stats()
            node.update_commands_stats(good_node_backends)

        except Exception as e:
            logger.exception('Unable to process statistics for node {}'.format(node))