示例#1
0
    def run(self):
        """Run this section and print out information."""
        grouping = Grouping(
            group_by=lambda x: (x.datetime, x.cursorid, x.reapedtime))
        logfile = self.mloginfo.logfile

        if logfile.start and logfile.end:
            progress_start = self.mloginfo._datetime_to_epoch(logfile.start)
            progress_total = (self.mloginfo._datetime_to_epoch(logfile.end) -
                              progress_start)
        else:
            self.mloginfo.progress_bar_enabled = False

        for i, le in enumerate(logfile):
            # update progress bar every 1000 lines
            if self.mloginfo.progress_bar_enabled and (i % 1000 == 0):
                if le.datetime:
                    progress_curr = self.mloginfo._datetime_to_epoch(
                        le.datetime)
                    if progress_total:
                        (self.mloginfo.update_progress(
                            float(progress_curr - progress_start) /
                            progress_total))

            if 'Cursor id' in le.line_str:
                lt = LogTuple(le.datetime, le.cursor, le._reapedtime)
                grouping.add(lt)

        grouping.sort_by_size()

        # clear progress bar again
        if self.mloginfo.progress_bar_enabled:
            self.mloginfo.update_progress(1.0)

        # no cursor information in the log file
        if not len(grouping):
            print('no cursor information found.')
            return

        titles = ['datetime', 'cursorid', 'reapedtime']

        table_rows = []
        # using only important key-values
        for g in grouping:
            # calculate statistics for this group
            datetime, cursorid, reapedtime = g
            stats = OrderedDict()
            stats['datetime'] = str(datetime)
            stats['cursorid'] = str(cursorid)
            stats['reapedtime'] = str(reapedtime)
            table_rows.append(stats)

        print_table(table_rows, titles, uppercase_headers=True)

        print('')
示例#2
0
    def run(self):
        """ run this section and print out information. """
        grouping = Grouping(group_by=lambda x: (x.namespace, x.pattern))
        logfile = self.mloginfo.logfile

        if logfile.start and logfile.end:
            progress_start = self.mloginfo._datetime_to_epoch(logfile.start)
            progress_total = self.mloginfo._datetime_to_epoch(logfile.end) - progress_start
        else:
            self.progress_bar_enabled = False


        for i, le in enumerate(logfile):
            # update progress bar every 1000 lines
            if self.progress_bar_enabled and (i % 1000 == 0):
                if le.datetime:
                    progress_curr = self.mloginfo._datetime_to_epoch(le.datetime)
                    self.mloginfo.update_progress(float(progress_curr-progress_start) / progress_total)

            if le.operation in ['query', 'update', 'remove']:
                grouping.add(le)

        grouping.sort_by_size()

        # clear progress bar again
        self.mloginfo.update_progress(1.0)

        titles = ['namespace', 'pattern', 'count', 'min (ms)', 'max (ms)', 'mean (ms)', 'sum (ms)']
        table_rows = []
        for g in grouping:
            # calculate statistics for this group
            namespace, pattern = g

            group_events = [le.duration for le in grouping[g] if le.duration != None]

            stats = OrderedDict()
            stats['namespace'] = namespace
            stats['pattern'] = pattern
            stats['count'] = len( group_events )
            stats['min'] = min( group_events ) if group_events else '-'
            stats['max'] = max( group_events ) if group_events else '-'
            stats['mean'] = 0
            stats['sum'] = sum( group_events ) if group_events else '-'
            stats['mean'] = stats['sum'] / stats['count'] if group_events else '-'

            if self.mloginfo.args['verbose']:
                stats['example'] = grouping[g][0]
                titles.append('example')

            table_rows.append(stats)

        table_rows = sorted(table_rows, key=itemgetter('sum'), reverse=True)
        print_table(table_rows, titles, uppercase_headers=False)
        print 
示例#3
0
    def run(self):
        """Run this section and print out information."""
        titles = ['date', 'host', 'state/message']
        table_rows = []

        for host, state, logevent in self.mloginfo.logfile.rs_state:
            stats = OrderedDict()
            stats['date'] = logevent.datetime.strftime("%b %d %H:%M:%S")
            stats['host'] = host
            stats['state/message'] = state
            table_rows.append(stats)

        print_table(table_rows, titles, uppercase_headers=False)

        if len(self.mloginfo.logfile.rs_state) == 0:
            print("  no rs state changes found")
示例#4
0
    def run(self):
        """ run this section and print out information. """
        grouping = Grouping(group_by=lambda x: (x.namespace, x.pattern))
        logfile = self.mloginfo.logfile

        if logfile.start and logfile.end:
            progress_start = self.mloginfo._datetime_to_epoch(logfile.start)
            progress_total = self.mloginfo._datetime_to_epoch(
                logfile.end) - progress_start
        else:
            self.progress_bar_enabled = False

        for i, le in enumerate(logfile):
            # update progress bar every 1000 lines
            if self.progress_bar_enabled and (i % 1000 == 0):
                if le.datetime:
                    progress_curr = self.mloginfo._datetime_to_epoch(
                        le.datetime)
                    self.mloginfo.update_progress(
                        float(progress_curr - progress_start) / progress_total)

            if le.operation in ['query', 'update', 'remove']:
                grouping.add(le)

        grouping.sort_by_size()

        # clear progress bar again
        self.mloginfo.update_progress(1.0)

        titles = [
            'namespace', 'pattern', 'count', 'min (ms)', 'max (ms)',
            'mean (ms)', 'sum (ms)'
        ]
        table_rows = []
        for g in grouping:
            # calculate statistics for this group
            namespace, pattern = g

            group_events = [
                le.duration for le in grouping[g] if le.duration != None
            ]

            stats = OrderedDict()
            stats['namespace'] = namespace
            stats['pattern'] = pattern
            stats['count'] = len(group_events)
            stats['min'] = min(group_events) if group_events else '-'
            stats['max'] = max(group_events) if group_events else '-'
            stats['mean'] = 0
            stats['sum'] = sum(group_events) if group_events else '-'
            stats['mean'] = stats['sum'] / stats[
                'count'] if group_events else '-'

            if self.mloginfo.args['verbose']:
                stats['example'] = grouping[g][0]
                titles.append('example')

            table_rows.append(stats)

        table_rows = sorted(table_rows, key=itemgetter('sum'), reverse=True)
        print_table(table_rows, titles, uppercase_headers=False)
        print
示例#5
0
    def run(self):
        """Run this section and print out information."""
        grouping = Grouping(
            group_by=lambda x: (x.namespace, x.operation, x.pattern))
        logfile = self.mloginfo.logfile

        if logfile.start and logfile.end:
            progress_start = self.mloginfo._datetime_to_epoch(logfile.start)
            progress_total = (self.mloginfo._datetime_to_epoch(logfile.end) -
                              progress_start)
        else:
            self.mloginfo.progress_bar_enabled = False

        for i, le in enumerate(logfile):
            # update progress bar every 1000 lines
            if self.mloginfo.progress_bar_enabled and (i % 1000 == 0):
                if le.datetime:
                    progress_curr = self.mloginfo._datetime_to_epoch(
                        le.datetime)
                    if progress_total:
                        (self.mloginfo.update_progress(
                            float(progress_curr - progress_start) /
                            progress_total))

            if (le.operation in ['query', 'getmore', 'update', 'remove']
                    or le.command
                    in ['count', 'findandmodify', 'geonear', 'find']):
                lt = LogTuple(namespace=le.namespace,
                              operation=op_or_cmd(le),
                              pattern=le.pattern,
                              duration=le.duration)
                grouping.add(lt)

        grouping.sort_by_size()

        # clear progress bar again
        if self.mloginfo.progress_bar_enabled:
            self.mloginfo.update_progress(1.0)

        # no queries in the log file
        if len(grouping) < 1:
            print('no queries found.')
            return

        titles = [
            'namespace', 'operation', 'pattern', 'count', 'min (ms)',
            'max (ms)', 'mean (ms)', '95%-ile (ms)', 'sum (ms)'
        ]
        table_rows = []

        for g in grouping:
            # calculate statistics for this group
            namespace, op, pattern = g

            group_events = [
                le.duration for le in grouping[g] if le.duration is not None
            ]

            stats = OrderedDict()
            stats['namespace'] = namespace
            stats['operation'] = op
            stats['pattern'] = pattern
            stats['count'] = len(group_events)
            stats['min'] = min(group_events) if group_events else '-'
            stats['max'] = max(group_events) if group_events else '-'
            stats['mean'] = 0
            if np:
                stats['95%'] = (np.percentile(group_events, 95)
                                if group_events else '-')
            else:
                stats['95%'] = 'n/a'
            stats['sum'] = sum(group_events) if group_events else '-'
            stats['mean'] = (stats['sum'] /
                             stats['count'] if group_events else '-')

            if self.mloginfo.args['verbose']:
                stats['example'] = grouping[g][0]
                titles.append('example')

            table_rows.append(stats)

        # sort order depending on field names
        reverse = True
        if self.mloginfo.args['sort'] in ['namespace', 'pattern']:
            reverse = False

        table_rows = sorted(table_rows,
                            key=itemgetter(self.mloginfo.args['sort']),
                            reverse=reverse)
        print_table(table_rows, titles, uppercase_headers=False)
        print('')
示例#6
0
    def _print_chunk_migrations(self, chunks, moved_from=False):
        """Prints the chunk migration statistics in a table depending on to/from flag"""
        verbose = self.mloginfo.args['verbose']
        chunks.reverse()

        if verbose:
            chunk_groupings = Grouping(group_by=lambda x: x.time)
        else:
            chunk_groupings = Grouping(group_by=lambda x: (x.time.strftime(
                "%Y-%m-%dT%H"), x.movedFromTo, x.namespace))

        for chunk_moved in chunks:
            time, chunk_range, moved_to_from, namespace, steps, status, error_message = chunk_moved
            moved_tuple = ChunksTuple(time=time,
                                      range=chunk_range,
                                      movedFromTo=moved_to_from,
                                      namespace=namespace,
                                      steps=steps,
                                      migrationStatus=status,
                                      errorMessage=error_message)
            chunk_groupings.add(moved_tuple)

        move_to_from_title = 'to shard' if moved_from else 'from shard'
        if verbose:
            titles = [
                '  time', move_to_from_title, 'namespace',
                'chunk migration status'
            ]
        else:
            titles = [
                '  time (/hour)', move_to_from_title, 'namespace',
                '# chunks migrations attempted', 'successful chunk migrations',
                'failed chunk migrations'
            ]

        if len(chunk_groupings) == 0:
            print("  no chunk migrations found.")
        else:
            table_rows = []
            for group, chunks in chunk_groupings.items():

                if verbose:
                    time = group.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
                    chunk = chunks[0]
                else:
                    time, moved_to_from, namespace = group
                    successful_count = 0
                    total_time_spent = 0
                    failed = dict()
                    succeeded_after = dict()
                    for chunk in chunks:
                        if chunk.migrationStatus == "success":
                            successful_count += 1
                            succeeded_after[chunk.range] = (True, chunk.time)
                            total_time_spent += sum(
                                int(ms) for step, ms in chunk.steps)
                        else:
                            count, timestamps = failed.get(
                                chunk.errorMessage, (0, list()))
                            count += 1
                            successful_after, timestamp = succeeded_after.get(
                                chunk.range, (False, None))
                            if successful_after:
                                timestamp = timestamp.strftime(
                                    "%H:%M:%S.%f")[:-3]
                                chunk_time = chunk.time.strftime(
                                    "%H:%M:%S.%f")[:-3]
                                timestamps.append(
                                    chunk_time +
                                    f' BECAME SUCCESSFUL AT: {timestamp}')
                            else:
                                timestamps.append(
                                    chunk.time.strftime("%H:%M:%S.%f")[:-3])
                            failed[chunk.errorMessage] = (count, timestamps)

                moved_chunks = OrderedDict()

                moved_chunks['time'] = f"  {time}"
                moved_chunks['movedFromTo'] = moved_to_from
                moved_chunks['namespace'] = namespace

                if verbose:
                    if chunk.migrationStatus == "success":
                        total_time_spent = sum(
                            int(ms) for step, ms in chunk.steps)
                        msg = f"Successful | Total time spent {total_time_spent}ms"
                        step_breakdown = ', '.join(f"{step}: {ms}ms"
                                                   for step, ms in chunk.steps)
                        moved_chunks[
                            'chunkMigrationStatus'] = msg + f" ({step_breakdown})"
                    else:
                        moved_chunks[
                            'chunkMigrationStatus'] = f"Failed with {chunk.errorMessage}"
                else:
                    moved_chunks['numberOfChunks'] = f'{len(chunks)} chunk(s)'
                    msg = (f"{successful_count} chunk(s) moved " +
                           f"| Total time spent: {total_time_spent}ms")
                    moved_chunks['successChunkMigrations'] = msg

                    failed_migrations = ""
                    for error, info in failed.items():
                        count, timestamps = info
                        failed_migrations += (
                            f'{count} chunk(s): {timestamps} '
                            f'failed with "{error}".')

                    if len(failed_migrations):
                        moved_chunks[
                            'failedChunkMigrations'] = failed_migrations
                    else:
                        moved_chunks[
                            'failedChunkMigrations'] = "no failed chunks."

                table_rows.append(moved_chunks)

            print_table(table_rows, titles)
            if not verbose:
                print(
                    "\nto show individual chunk migration, run with --verbose."
                )
示例#7
0
    def _print_chunk_statistics(self):
        """Prints the chunk split statistics in a table"""
        self.mloginfo.logfile.chunk_splits.reverse()

        chunk_split_groupings = Grouping(
            group_by=lambda x: (x.time.strftime("%Y-%m-%dT%H"), x.namespace))

        for chunk_split in self.mloginfo.logfile.chunk_splits:
            time, split_range, namespace, numSplits, success, timeTaken, error = chunk_split
            split_tuple = SplitTuple(time=time,
                                     range=split_range,
                                     namespace=namespace,
                                     numSplits=numSplits,
                                     success=success,
                                     timeTaken=timeTaken,
                                     error=error)
            chunk_split_groupings.add(split_tuple)

        titles = [
            '  time (/hour)', 'namespace', '# split-vectors issued',
            'successful chunk splits', 'failed chunk splits'
        ]

        if len(chunk_split_groupings) == 0:
            print("  no chunk splits found.")
        else:
            table_rows = []
            for group, splits in chunk_split_groupings.items():

                time, namespace = group
                successful_count = 0
                total_number_vectors = 0
                split_succeeded_after = dict()
                failed_splits = dict()
                total_time_taken = 0
                for split in splits:
                    total_number_vectors += int(split.numSplits)
                    if (not split.success) and split.error:
                        count, timestamps = failed_splits.get(
                            split.error, (0, list()))
                        count += 1
                        if split_succeeded_after.get(split.range, False):
                            timestamps.append(
                                split.time.strftime("%H:%M:%S.%f")[:-3] +
                                ' **WAS SUCCESSFUL AFTER**')
                        else:
                            timestamps.append(
                                split.time.strftime("%H:%M:%S.%f")[:-3])
                        failed_splits[split.error] = (count, timestamps)
                    elif split.success:
                        split_succeeded_after[split.range] = True
                        successful_count += 1
                        total_time_taken += sum(
                            int(ms) for ms in split.timeTaken)

                split_summary = OrderedDict()

                split_summary['time'] = f"  {time}"
                split_summary['namespace'] = namespace

                split_summary[
                    'numSplitVectors'] = f'{total_number_vectors} split vector(s)'
                msg = (f"{successful_count} chunk(s) splitted" +
                       f" | Total time spent: {total_time_taken}ms")
                split_summary['successfulSplits'] = msg

                failed_split = ""
                for error, info in failed_splits.items():
                    count, timestamps = info
                    if error == "Jumbo":
                        failed_split += (f'{count} chunk(s): ' +
                                         f'{timestamps} marked as {error}.')
                    else:
                        failed_split += (f'{count} chunk(s): {timestamps} ' +
                                         f'failed with "{error}". ')

                if len(failed_split):
                    split_summary['failedChunkSplits'] = failed_split
                else:
                    split_summary[
                        'failedChunkSplits'] = "no failed chunk splits."

                table_rows.append(split_summary)

            print_table(table_rows, titles)
示例#8
0
    def run(self):
        """Run this section and print out information."""
        grouping = Grouping(group_by=lambda x: (
            x.datetime, x.txnNumber, x.autocommit, x.readConcern, x.
            timeActiveMicros, x.timeInactiveMicros, x.duration))

        logfile = self.mloginfo.logfile

        if logfile.start and logfile.end:
            progress_start = self.mloginfo._datetime_to_epoch(logfile.start)
            progress_total = (self.mloginfo._datetime_to_epoch(logfile.end) -
                              progress_start)
        else:
            self.mloginfo.progress_bar_enabled = False

        for i, le in enumerate(logfile):
            # update progress bar every 1000 lines

            if self.mloginfo.progress_bar_enabled and (i % 1000 == 0):
                if le.datetime:
                    progress_curr = self.mloginfo._datetime_to_epoch(
                        le.datetime)
                    if progress_total:
                        (self.mloginfo.update_progress(
                            float(progress_curr - progress_start) /
                            progress_total))

            if re.search('transaction', le.line_str):
                lt = LogTuple(le.datetime, le.txnNumber, le.autocommit,
                              le.readConcern, le.timeActiveMicros,
                              le.timeInactiveMicros, le.duration)

                grouping.add(lt)

        grouping.sort_by_size()

        # clear progress bar again
        if self.mloginfo.progress_bar_enabled:
            self.mloginfo.update_progress(1.0)

        # no queries in the log file
        if not len(grouping):
            print('no transactions found.')
            return

        titles = [
            'datetime', 'txnNumber', 'autocommit', 'readConcern',
            'timeActiveMicros', 'timeInactiveMicros', 'duration'
        ]

        table_rows = []
        # using only important key-values
        # can be used in future
        for g in grouping:
            # calculate statistics for this group
            datetime, txnNumber, autocommit, readConcern, timeActiveMicros, timeInactiveMicros, duration = g
            stats = OrderedDict()
            stats['datetime'] = str(datetime)
            stats['txnNumber'] = txnNumber
            stats['autocommit'] = autocommit
            stats['readConcern'] = readConcern
            stats['timeActiveMicros'] = timeActiveMicros
            stats['timeInactiveMicros'] = timeInactiveMicros
            stats['duration'] = duration
            table_rows.append(stats)

        if self.mloginfo.args['tsort'] == 'duration':
            table_rows = sorted(table_rows,
                                key=itemgetter(self.mloginfo.args['tsort']),
                                reverse=True)

        print_table(table_rows, titles, uppercase_headers=True)

        print('')
示例#9
0
    def run(self):
        """Run this section and print out information."""
        grouping = Grouping(group_by=lambda x: (x.namespace, x.operation,
                                                x.pattern))
        logfile = self.mloginfo.logfile

        if logfile.start and logfile.end:
            progress_start = self.mloginfo._datetime_to_epoch(logfile.start)
            progress_total = (self.mloginfo._datetime_to_epoch(logfile.end) -
                              progress_start)
        else:
            self.mloginfo.progress_bar_enabled = False

        for i, le in enumerate(logfile):
            # update progress bar every 1000 lines
            if self.mloginfo.progress_bar_enabled and (i % 1000 == 0):
                if le.datetime:
                    progress_curr = self.mloginfo._datetime_to_epoch(le
                                                                     .datetime)
                    if progress_total:
                        (self.mloginfo
                         .update_progress(float(progress_curr -
                                                progress_start) /
                                          progress_total))

            if (le.operation in ['query', 'getmore', 'update', 'remove'] or
                    le.command in ['count', 'findandmodify',
                                   'geonear', 'find']):
                lt = LogTuple(namespace=le.namespace, operation=op_or_cmd(le),
                              pattern=le.pattern, duration=le.duration)
                grouping.add(lt)

        grouping.sort_by_size()

        # clear progress bar again
        if self.mloginfo.progress_bar_enabled:
            self.mloginfo.update_progress(1.0)

        # no queries in the log file
        if len(grouping) < 1:
            print('no queries found.')
            return

        titles = ['namespace', 'operation', 'pattern', 'count', 'min (ms)',
                  'max (ms)', 'mean (ms)', '95%-ile (ms)', 'sum (ms)']
        table_rows = []

        for g in grouping:
            # calculate statistics for this group
            namespace, op, pattern = g

            group_events = [le.duration for le in grouping[g]
                            if le.duration is not None]

            stats = OrderedDict()
            stats['namespace'] = namespace
            stats['operation'] = op
            stats['pattern'] = pattern
            stats['count'] = len(group_events)
            stats['min'] = min(group_events) if group_events else '-'
            stats['max'] = max(group_events) if group_events else '-'
            stats['mean'] = 0
            if np:
                stats['95%'] = (np.percentile(group_events, 95)
                                if group_events else '-')
            else:
                stats['95%'] = 'n/a'
            stats['sum'] = sum(group_events) if group_events else '-'
            stats['mean'] = (stats['sum'] / stats['count']
                             if group_events else '-')

            if self.mloginfo.args['verbose']:
                stats['example'] = grouping[g][0]
                titles.append('example')

            table_rows.append(stats)

        # sort order depending on field names
        reverse = True
        if self.mloginfo.args['sort'] in ['namespace', 'pattern']:
            reverse = False

        table_rows = sorted(table_rows,
                            key=itemgetter(self.mloginfo.args['sort']),
                            reverse=reverse)
        print_table(table_rows, titles, uppercase_headers=False)
        print('')
示例#10
0
    def run(self):
        """Run this section and print out information."""
        grouping = Grouping(group_by=lambda x: (
            x.namespace, x.operation, x.bytesRead, x.bytesWritten, x.
            timeReadingMicros, x.timeWritingMicros))
        logfile = self.mloginfo.logfile

        if logfile.start and logfile.end:
            progress_start = self.mloginfo._datetime_to_epoch(logfile.start)
            progress_total = (self.mloginfo._datetime_to_epoch(logfile.end) -
                              progress_start)
        else:
            self.mloginfo.progress_bar_enabled = False

        for i, le in enumerate(logfile):
            # update progress bar every 1000 lines
            if self.mloginfo.progress_bar_enabled and (i % 1000 == 0):
                if le.datetime:
                    progress_curr = self.mloginfo._datetime_to_epoch(
                        le.datetime)
                    if progress_total:
                        (self.mloginfo.update_progress(
                            float(progress_curr - progress_start) /
                            progress_total))

            if (le.operation in ['update'] or le.command in ['insert']):
                lt = LogTuple(namespace=le.namespace,
                              operation=op_or_cmd(le),
                              bytesRead=le.bytesRead,
                              bytesWritten=le.bytesWritten,
                              timeReadingMicros=le.timeReadingMicros,
                              timeWritingMicros=le.timeWritingMicros)
                grouping.add(lt)

        grouping.sort_by_size()

        # clear progress bar again
        if self.mloginfo.progress_bar_enabled:
            self.mloginfo.update_progress(1.0)

        # no queries in the log file
        if not len(grouping):
            print('no statistics found.')
            return

        titles = [
            'namespace', 'operation', 'bytesRead', 'bytesWritten',
            'timeReadingMicros', 'timeWritingMicros'
        ]
        table_rows = []

        for g in grouping:
            # calculate statistics for this group
            namespace, op, bytesRead, bytesWritten, timeReadingMicros, timeWritingMicros = g

            stats = OrderedDict()
            stats['namespace'] = namespace
            stats['operation'] = op
            stats['bytesRead'] = bytesRead
            stats['bytesWritten'] = bytesWritten
            stats['timeReadingMicros'] = timeReadingMicros
            stats['timeWritingMicros'] = timeWritingMicros

            table_rows.append(stats)

        print_table(table_rows, titles, uppercase_headers=False)
        print('')
    def run(self):
        """ run this section and print out information. """
        grouping = Grouping(group_by=lambda x: (x.collection, x.operation, x.
                                                pattern, x.sort_pattern))
        logfile = self.mloginfo.logfile
        min_duration = self.mloginfo.args['min_duration']
        min_nscanned = self.mloginfo.args['min_nscanned']

        if logfile.start and logfile.end:
            progress_start = self.mloginfo._datetime_to_epoch(logfile.start)
            progress_total = self.mloginfo._datetime_to_epoch(
                logfile.end) - progress_start
        else:
            self.mloginfo.progress_bar_enabled = False

        for i, le in enumerate(logfile):
            # update progress bar every 1000 lines
            if self.mloginfo.progress_bar_enabled and (i % 1000 == 0):
                if le.datetime:
                    progress_curr = self.mloginfo._datetime_to_epoch(
                        le.datetime)
                    self.mloginfo.update_progress(
                        float(progress_curr - progress_start) / progress_total)

            if min_duration and le.duration < min_duration:
                continue
            if min_nscanned and le.nscanned < min_nscanned:
                continue

            if le.operation in [
                    'query', 'getmore', 'update', 'remove'
            ] or le.command in ['count', 'findandmodify', 'geonear']:
                db, collection = le.namespace.split(".")
                lt = LogTuple(db=db,
                              collection=collection,
                              nscanned=le.nscanned,
                              ntoreturn=le.ntoreturn,
                              writeConflicts=le.writeConflicts,
                              operation=op_or_cmd(le),
                              pattern=le.pattern,
                              duration=le.duration,
                              sort_pattern=le.sort_pattern)
                grouping.add(lt)

        grouping.sort_by_size(group_limit=30)

        # clear progress bar again
        if self.mloginfo.progress_bar_enabled:
            self.mloginfo.update_progress(1.0)

        # no queries in the log file
        if len(grouping) < 1:
            print 'no queries found.'
            return

        titles = [
            'collection', 'operation', 'pattern', 'sort_pattern', 'count',
            'mean (ms)', 'sum (mins)'
        ]
        table_rows = []

        for g in grouping:
            # calculate statistics for this group
            try:
                collection, op, pattern, sort_pattern = g
            except:
                collection, op, pattern, sort_pattern = [
                    'others', 'others', 'others', 'others'
                ]

            group_events = [
                le.duration for le in grouping[g] if le.duration != None
            ]

            stats = OrderedDict()
            stats['collection'] = collection
            stats['operation'] = op
            stats['pattern'] = pattern
            stats['sort_pattern'] = sort_pattern
            stats['count'] = len(group_events)
            stats['mean'] = 0

            stats['sum'] = sum(group_events) if group_events else '-'
            stats['mean'] = stats['sum'] / stats[
                'count'] if group_events else '-'
            stats['sum'] = round(stats['sum'] / 1000.0 /
                                 60, 2) if group_events else '-'

            if self.mloginfo.args['verbose']:
                stats['example'] = grouping[g][0]
                titles.append('example')

            table_rows.append(stats)

        # sort order depending on field names
        reverse = True
        if self.mloginfo.args['sort'] in ['namespace', 'pattern']:
            reverse = False

        table_rows = sorted(table_rows,
                            key=itemgetter(self.mloginfo.args['sort']),
                            reverse=reverse)
        print_table(table_rows, titles, uppercase_headers=False)
        print
示例#12
0
    def list(self):
        """ sub-command list. Takes no further parameters. Will discover the current configuration and
            print a table of all the nodes with status and port.
        """
        self.discover()
        print_docs = []

        # mongos
        for node in sorted(self.get_tagged(['mongos'])):
            doc = {'process':'mongos', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'}
            print_docs.append( doc )
        
        if len(self.get_tagged(['mongos'])) > 0:
            print_docs.append( None )

        # configs
        for node in sorted(self.get_tagged(['config'])):
            doc = {'process':'config server', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'}
            print_docs.append( doc )
        
        if len(self.get_tagged(['config'])) > 0:
            print_docs.append( None )

        # mongod
        for shard in self._get_shard_names(self.loaded_args):
            tags = []
            replicaset = 'replicaset' in self.loaded_args and self.loaded_args['replicaset']
            padding = ''

            if shard:
                print_docs.append(shard)
                tags.append(shard)
                padding = '    '

            if replicaset:
                # primary
                primary = self.get_tagged(tags + ['primary', 'running'])
                if len(primary) > 0:
                    node = list(primary)[0]
                    print_docs.append( {'process':padding+'primary', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} )
                
                # secondaries
                secondaries = self.get_tagged(tags + ['secondary', 'running'])
                for node in sorted(secondaries):
                    print_docs.append( {'process':padding+'secondary', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} )
                
                # data-bearing nodes that are down or not in the replica set yet
                mongods = self.get_tagged(tags + ['mongod'])
                arbiters = self.get_tagged(tags + ['arbiter'])

                nodes = sorted(mongods - primary - secondaries - arbiters)
                for node in nodes:
                    print_docs.append( {'process':padding+'mongod', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'})

                # arbiters
                for node in arbiters:
                    print_docs.append( {'process':padding+'arbiter', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} )

            else:
                nodes = self.get_tagged(tags + ['mongod'])
                if len(nodes) > 0:
                    node = nodes.pop()
                    print_docs.append( {'process':padding+'single', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} )
            if shard:
                print_docs.append(None)


        if self.args['verbose']:
            # print tags as well
            for doc in filter(lambda x: type(x) == dict, print_docs):               
                tags = self.get_tags_of_port(doc['port'])
                doc['tags'] = ', '.join(tags)

        print_docs.append( None )   
        print         
        print_table(print_docs)