Exemple #1
0
def print_matrix(m: np.ndarray, head: np.ndarray = None, title: str = "", c_type: str = 'a') -> None:
    """ display matrix
    :param m:
    :param head: head matrix
    :param title: title matrix
    :param c_type:
    :return:
    """
    cols_align = []
    cols_m = m.shape[1]
    rows_m = m.shape[0]
    for i in range(0, cols_m):
        if i == 0:
            cols_align.append("l")
        else:
            cols_align.append("r")

    content = []
    if head is None:
        head = [' ' for x in range(0, cols_m)]
    content.append(head)
    for i in range(0, rows_m):
        content.append(m[i])

    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.set_header_align(cols_align)
    table.set_cols_dtype([c_type] * cols_m)  # automatic
    table.set_cols_align(cols_align)
    table.add_rows(content)

    if title != "":
        print("**********************  " + title + "  **********************")

    print(table.draw())
Exemple #2
0
    def get_table(self, width: int = 120):
        table = Texttable()
        table.set_deco(Texttable.HEADER | Texttable.HLINES)
        table.set_max_width(width)

        header = [
            'Property', 'Description', 'Type', 'Dist.', 'Mean', 'Sigma',
            'Offset', 'Truth'
        ]
        table.set_cols_dtype(['t', 't', 't', 't', 'a', 'a', 'a', 'a'])
        table.set_header_align(['l', 'l', 'l', 'l', 'l', 'l', 'l', 'l'])

        rows = [header]
        for name in self.design_properties:
            design_property = self.design_properties[name]
            row = [
                design_property.name, design_property.description,
                design_property.property_type, design_property.distribution,
                design_property.mean, design_property.sigma,
                design_property.get_offset(),
                self.true_properties[design_property.name].get_value()
            ]
            rows.append(row)

        table.add_rows(rows)
        return table.draw()
Exemple #3
0
    def do(self):
        table = Texttable(max_width=get_terminal_size().columns - 2)
        table.header(("id", "name", "status", "XmX"))
        table.set_header_align(("c", "c", "c", "c"))
        table.set_cols_align(("r", "l", "l", "r"))
        table.set_deco(Texttable.HEADER | Texttable.VLINES)

        for _id, settings in self.context.instances_settings.items():
            active_state = external_command("systemctl",
                                            "is-active",
                                            f"existdb@{_id}",
                                            capture_output=True,
                                            check=False,
                                            text=True).stdout.strip()
            enabled_state = external_command("systemctl",
                                             "is-enabled",
                                             f"existdb@{_id}",
                                             capture_output=True,
                                             check=False,
                                             text=True).stdout.strip()

            table.add_row(
                (_id, settings["name"], f"{enabled_state}\n{active_state}",
                 settings["xmx"]))

        print("\n" + table.draw())
        print("\nThe XmX values refer to the configuration, "
              "not necessarily the currently effective.")
def print_empty_docs(dataset: StructuredDataset):
    """
    Prints the empty documents in the given dataset.
    """
    # Create table for better printing
    table = Texttable()
    table.set_cols_width([30, 15, 10, 10, 10])
    table.set_cols_align(['c', 'c', 'c', 'c', 'l'])
    # Specify header
    table.set_header_align(['c', 'c', 'c', 'c', 'c'])
    table.header([
        'Category name', 'Doc index inside category list of docs', 'Num words',
        'Document name', 'Content preview'
    ])
    num_empty_docs = 0
    for category_name, category_docs in dataset.files_dict.items():
        doc_index_in_category = 0
        for doc in category_docs:
            doc_words = doc.content.split()
            if len(doc_words) == 0:
                num_empty_docs += 1
                num_words_in_doc = len(doc_words)
                # Add row for each doc that contain the given word
                table.add_row([
                    category_name, doc_index_in_category, num_words_in_doc,
                    doc.name, doc.content
                ])
            doc_index_in_category += 1
    print(table.draw())
    print(" Num empty docs:", num_empty_docs)
Exemple #5
0
def test_chaining():
    table = Texttable()
    table.reset()
    table.set_max_width(50)
    table.set_chars(list('-|+='))
    table.set_deco(Texttable.BORDER)
    table.set_header_align(list('lll'))
    table.set_cols_align(list('lll'))
    table.set_cols_valign(list('mmm'))
    table.set_cols_dtype(list('ttt'))
    table.set_cols_width([3, 3, 3])
    table.set_precision(3)
    table.header(list('abc'))
    table.add_row(list('def'))
    table.add_rows([list('ghi')], False)
    s1 = table.draw()
    s2 = (Texttable()
          .reset()
          .set_max_width(50)
          .set_chars(list('-|+='))
          .set_deco(Texttable.BORDER)
          .set_header_align(list('lll'))
          .set_cols_align(list('lll'))
          .set_cols_valign(list('mmm'))
          .set_cols_dtype(list('ttt'))
          .set_cols_width([3, 3, 3])
          .set_precision(3)
          .header(list('abc'))
          .add_row(list('def'))
          .add_rows([list('ghi')], False)
          .draw())
    assert s1 == s2
Exemple #6
0
def print_empty_docs(dataset: UnstructuredDataset):
    """
    Prints the empty documents in the given dataset.
    """
    # Create table for better printing
    table = Texttable()
    table.set_cols_width([15, 10, 10, 10])
    table.set_cols_align(['c', 'c', 'c', 'l'])
    # Specify header
    table.set_header_align(['c', 'c', 'c', 'c'])
    table.header(
        ['Doc index', 'Num words', 'Document name', 'Content preview'])

    num_empty_docs = 0
    doc_index = 0
    for doc in dataset.files_list:
        doc_words = doc.content.split()
        if len(doc_words) == 0:
            num_empty_docs += 1
            num_words_in_doc = len(doc_words)
            # Add row for each doc that contain the given word
            table.add_row([doc_index, num_words_in_doc, doc.name, doc.content])
        doc_index += 1

    print(table.draw())
    print(" Num empty docs:", num_empty_docs)
Exemple #7
0
 def set_table(self):
     mytable = Texttable()
     mytable.set_deco(Texttable.HEADER)
     mytable.set_cols_dtype(['t', 't'])
     header_str = "label value"
     mytable.set_header_align(['t', 't'])
     mytable.header(header_str.split())
     mytable.set_cols_width( [35, 120])
     return mytable
def utilization(obj, targets):
    # This plugin only works for SlurmBackend
    backend_cls = Backend.from_config(obj)
    if not issubclass(backend_cls, SlurmBackend):
        raise GWFError('Utilization plugin only works for Slurm backend!')

    graph = Graph.from_config(obj)

    # If user specified list of targets, only report utilization for these.
    # Otherwise, report utilization for all targets.
    matches = graph.targets.values()
    if targets:
        matches = filter_names(matches, targets)

    with backend_cls() as backend:
        job_ids = []
        for target in matches:
            try:
                job_ids.append(backend.get_job_id(target))
            except KeyError:
                pass

    rows = [[
        'Target', 'Cores', 'Walltime Alloc', 'Walltime Used', 'Memory Alloc',
        'Memory Used', 'CPU Time Alloc', 'CPU Time Used', 'Walltime %',
        'Memory %', 'CPU %'
    ]]
    for job in get_jobs(job_ids):
        rows.append([
            job.name, job.allocated_cores,
            pretty_time(job.allocated_time_per_core),
            pretty_time(job.used_walltime),
            pretty_size(job.allocated_memory),
            pretty_size(job.used_memory),
            pretty_time(job.allocated_cpu_time),
            pretty_time(job.used_cpu_time),
            str(format(job.walltime_utilization, '.1f')),
            format(job.memory_utilization, '.1f'),
            format(job.cpu_utilization, '.1f')
        ])

    table = Texttable()

    table.set_deco(Texttable.BORDER | Texttable.HEADER | Texttable.VLINES)

    ncols = len(rows[0])

    table.set_max_width(0)
    table.set_header_align('l' * ncols)
    table.set_cols_align(['l'] + (ncols - 1) * ['r'])
    table.set_cols_dtype(['t'] * ncols)

    table.add_rows(rows)

    print(table.draw())
Exemple #9
0
 def __print_entries(self):
     table = Texttable()
     table.set_deco(Texttable.HEADER)
     table.set_cols_dtype(["i", "t", "t"])
     table.set_cols_align(["l", "l", "l"])
     table.set_header_align(["l", "l", "l"])
     table.header(["ID", "Name", "Tracker"])
     for index, (folder_name, urls) in enumerate(
             self.announce_urls_by_folder_name.items()):
         self.__add_entry(table, index, folder_name, urls)
     table_output = table.draw()
     print(table_output, end="")
def print_docs_that_contain_word(dataset: StructuredDataset,
                                 word: str,
                                 num_chars_preview=70):
    """
    Prints a table with the following properties of all documents in the dataset that contain the given word:
        - Category name
        - Index inside the document list of that category (in the dataset.files_dict)
        - Number of words of that document
        - Number of occurrences of the word in that document
        - Document name in the dataset
        - Preview of the document content

    :param dataset: Dataset.
    :param word: Word contained in the printed documents.
    :param num_chars_preview: Number of characters to show in the preview column.
    """
    # Create table for better printing
    table = Texttable()
    table.set_cols_width([30, 15, 10, 15, 10, num_chars_preview])
    table.set_cols_align(['c', 'c', 'c', 'c', 'c', 'l'])

    # Specify header
    table.set_header_align(['c', 'c', 'c', 'c', 'c', 'c'])
    table.header([
        'Category name', 'Doc index inside category list of docs', 'Num words',
        'Num occurrences of given word', 'Document name', 'Content preview'
    ])

    num_docs_contain_word = 0
    for category_name, category_docs in dataset.files_dict.items():
        doc_index_in_category = 0
        for doc in category_docs:
            doc_words = doc.content.split()
            if word in doc_words:
                num_docs_contain_word += 1
                num_words_in_doc = len(doc_words)
                num_word_occurences_in_doc = doc_words.count(word)
                # Add row for each doc that contain the given word
                table.add_row([
                    category_name,
                    doc_index_in_category,
                    num_words_in_doc,
                    num_word_occurences_in_doc,
                    doc.name,
                    # TODO: Instead of showing the first k characters, it would be better to show text around the word
                    doc.content[:num_chars_preview]
                ])
            doc_index_in_category += 1

    print(table.draw())
    print(" Num docs with the word " + word + ":", num_docs_contain_word)
Exemple #11
0
def _dump_properties(properties, report):
    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.set_cols_dtype(['t', 't'])
    table.set_cols_width([70, 50])
    table.set_header_align(['l', 'l'])

    # this supports tuple-keys (for matching questions); which is why we don't simply use
    # json.dumps(properties) to print this out.
    for k, v in properties.items():
        table.add_row([json.dumps(k), json.dumps(v)])

    for line in table.draw().split('\n'):
        report(line)
Exemple #12
0
def _build_table(header, rows):
    table = Texttable()
    table.set_deco(Texttable.HEADER)
    table.header(header)
    table.set_header_align(['l' for _ in range(len(header))])

    max_widths = [len(i) for i in header]
    for row in rows:
        table.add_row(row)
        for idx, i in enumerate(row):
            max_widths[idx] = max(max_widths[idx], len(i))

    table.set_cols_width([min(i, 100) for i in max_widths])
    return table
Exemple #13
0
 def create_table(streams):
     '''Creates a terminal-friendly table from streams.'''
     table = Texttable()
     table.set_deco(Texttable.HEADER | Texttable.VLINES)
     table.set_header_align(['l', 'l', 'l'])
     table.set_max_width(0)
     data = [['Streamer', 'Title', 'Viewers']]
     data.extend(
         list(
             map(
                 lambda stream:
                 [stream.streamer, stream.name, stream.viewers], streams)))
     table.add_rows(data)
     return table.draw()
Exemple #14
0
def print_outputs(success_result: dict):
    errors = success_result["errors"]
    output = success_result["output"]

    if output:
        table = Texttable(max_width=100)
        alignment = ["r", "l", "l"]
        table.set_cols_align(alignment)
        table.set_header_align(alignment)
        table.header(["Thread", "Time", ""])
        table.set_deco(Texttable.HEADER)
        start = datetime.datetime.fromisoformat(output[0]["time"])
        for o in output:
            offset = datetime.datetime.fromisoformat(o["time"]) - start
            table.add_row([o["thread"], "+" + str(offset), o["text"].strip()])

        print("\n" + table.draw() + "\n")

    for idx, item in enumerate(errors):
        if item:
            print(bad(f"\nException in Thread {idx}"))
            print(item)
def print_words_that_contain_elem(dataset: Dataset, elem: str):
    """
    Prints a table with the following info:
        - Word that contains the given element.
        - Number of occurrences of the word in the whole dataset

    :param dataset: Dataset.
    :param elem: Elem contained in the printed words. \
    Will be used to create a regular expression, containing only that elem.
    """
    elem_re = re.compile(elem)

    # Create table for better printing
    table = Texttable()
    table.set_cols_width([30, 10])
    table.set_cols_align(['c', 'c'])

    # Specify header
    table.set_header_align(['c', 'c'])
    table.header(['Word', 'Num occurrences'])

    num_words_contain_elem = 0
    word_occurrence_dict = {}
    for doc in dataset.as_documents_content_list():
        for word in doc:
            if elem_re.search(word) is not None:
                if word not in word_occurrence_dict:
                    word_occurrence_dict[word] = 0
                word_occurrence_dict[word] += 1
                num_words_contain_elem += 1

    # Sort by the number of occurrences and add items to table
    word_occurrence_sorted = sorted(word_occurrence_dict.items(),
                                    key=lambda kv: kv[1])
    for word, occurences in word_occurrence_sorted:
        table.add_row([word, occurences])

    print(table.draw())
    print(" Num words with the elem " + elem + ":", num_words_contain_elem)
Exemple #16
0
def fstat_iter(depot_path, to_changelist, from_changelist=0, cache_dir='.o4'):
    '''
    Return the needed fstat data by combining three possible sources: perforce,
    the fstat server, and local fstat cache files.
    Note that the local files and the fstat server are guaranteed to return lines
    in (descending) changelist order, while the Perforce data may not be.
    The three sources are ordered [fstat server, perforce, fstat server, local];
    each one may or may not be used, and the fstat server will not be used twice.
    In the order read, each subset will contain only changelist numbers less than
    all that have been read in previous subsets.
    The local cache file created should not have more than one entry for any
    filename. Such duplication may come about due to a file having been changed in
    more than one of the changelist subsets being queried; a row for a file that
    has been seen already (and thus, at a higher changelist) must be ignored.

    Beware: do not break out of the returned generator! This will
    prevent local cache files from being created, causing superfluous
    access to perforce and/or fstat server.
    '''
    from tempfile import mkstemp
    from o4_pyforce import P4TimeoutError, P4Error

    to_changelist, from_changelist = int(to_changelist), int(from_changelist)
    cache_cl, cache_fname = get_fstat_cache(to_changelist, cache_dir)
    updated = []
    all_filenames = set()
    CLR = '%c[2K\r' % chr(27)

    summary = {'Perforce': None, 'Fstat server': None, 'Local cache': None}

    try:
        fout = temp_fname = None
        highest_written_cl = 0
        _first = _last = 0  # These are local and re-used in various blocks below
        fh, temp_fname = mkstemp(dir=cache_dir)
        os.close(fh)
        fout = gzip.open(temp_fname, 'wt', encoding='utf8', compresslevel=9)
        print(
            "# COLUMNS: F_CHANGELIST, F_PATH, F_REVISION, F_FILE_SIZE, F_CHECKSUM",
            file=fout)

        if cache_cl == to_changelist:
            print(f'*** INFO: Satisfied from local cache {cache_fname}',
                  file=sys.stderr)
            for cl, line in fstat_from_csv(cache_fname, fstat_cl):
                if not cl:
                    continue
                if cl < from_changelist:
                    break
                yield line
            return

        missing_range = (to_changelist, cache_cl + 1)
        o4server_range = (None, None)

        if o4_config.fstat_server():
            _first = _last = 0
            try:
                for line in fstat_from_server(depot_path, missing_range[0],
                                              missing_range[1],
                                              o4_config.fstat_server_nearby()):
                    cl, path, line = fstat_cl_path(line)
                    if not cl:
                        continue
                    _last = cl
                    _first = _first or cl
                    all_filenames.add(path)
                    print(line, file=fout)
                    if from_changelist < cl <= to_changelist:
                        yield line
                summary['Fstat server'] = (missing_range, (int(_first),
                                                           int(_last)))
                missing_range = (None, None)
            except FstatRedirection as e:
                print(
                    f'*** INFO: Fstat server redirected to changelist {e.cl}',
                    file=sys.stderr)
                if e.cl > to_changelist:
                    print(
                        f'*** WARNING: Fstat server redirected to {e.cl} which is greater',
                        f'than {to_changelist}.',
                        file=sys.stderr)
                    print(
                        '             Please contact [email protected].',
                        file=sys.stderr)
                elif e.cl > cache_cl:
                    missing_range = (to_changelist, e.cl + 1)
                    o4server_range = (e.cl, cache_cl + 1)
            except FstatServerError as e:
                summary['Fstat server'] = (missing_range, (0, 0))

        highest_written_cl = max(highest_written_cl, int(_first))

        perforce_filenames = dict()
        if missing_range[0]:
            retry = 3
            while retry:
                retry -= 1
                try:
                    for f in fstat_from_perforce(depot_path, missing_range[0],
                                                 missing_range[1]):
                        if f[F_PATH] and f[F_PATH] not in all_filenames:
                            if from_changelist < int(
                                    f[F_CHANGELIST]) <= to_changelist:
                                yield fstat_join(f)
                            f[0] = int(f[0])
                            perforce_filenames[f[F_PATH]] = f
                    break
                except P4Error as e:
                    done = False
                    for a in e.args:
                        fix = False
                        if 'Too many rows scanned' in a.get('data', ''):
                            if cache_cl:
                                msg = f"Maxrowscan occurred, ignoring cache {cache_fname}."
                                msg += ' This is probably due to a bad Root in your clientspec;'
                                msg += ' if not, contact the Perforce admins and let them know.'
                                print(f"{CLR}*** WARNING: {msg}",
                                      file=sys.stderr)
                                fix = True
                                missing_range = (to_changelist, None)
                                retry += 1
                        elif 'Request too large' in a.get('data', ''):
                            msg = f"*** ERROR: 'Request too large'. {depot_path} may be too broad."
                            if depot_path == '//...':
                                msg += ' This is almost certainly due to a bad Root in your clientspec.'
                            else:
                                msg += ' This may be due to a bad Root in your clientspec.'
                            sys.exit(f"{CLR}{msg}")
                        elif 'no such file' in a.get('data', ''):
                            print(
                                f"{CLR}*** INFO: Empty changelist range ({missing_range}).",
                                file=sys.stderr)
                            # Just an empty range of changelists, we are done
                            done = True
                            break
                        if not fix:
                            raise
                    if done:
                        break
                except P4TimeoutError:
                    perforce_filenames.clear()
                    print(
                        f"{CLR}*** WARNING: ({retry+1}/3) P4 Timeout while getting fstat",
                        file=sys.stderr)
            else:
                sys.exit(f"{CLR}*** ERROR: "
                         f"Too many P4 Timeouts for p4 fstat"
                         f"{depot_path}@{from_changelist},@{to_changelist}")

        all_filenames.update(perforce_filenames.keys())
        if perforce_filenames:
            perforce_rows = sorted(perforce_filenames.values(), reverse=True)
            summary['Perforce'] = (missing_range,
                                   (int(perforce_rows[0][F_CHANGELIST]),
                                    int(perforce_rows[-1][F_CHANGELIST])))
            highest_written_cl = max(highest_written_cl,
                                     int(perforce_rows[0][F_CHANGELIST]))
            for f in perforce_rows:
                print(fstat_join(f), file=fout)
            del perforce_filenames

        if o4server_range[0]:
            _first = _last = 0
            for line in fstat_from_server(depot_path, o4server_range[0],
                                          o4server_range[1]):
                cl, path, line = fstat_cl_path(line)
                if not cl:
                    continue
                _last = cl
                _first = _first or cl
                if path not in all_filenames:
                    all_filenames.add(path)
                    print(line, file=fout)
                    if from_changelist < cl <= to_changelist:
                        yield line
            summary['Fstat server'] = (o4server_range, (int(_first),
                                                        int(_last)))
            highest_written_cl = max(highest_written_cl, int(_first))

        if cache_cl:
            _first = _last = 0
            for cl, path, line in fstat_from_csv(cache_fname, fstat_cl_path):
                if not cl:
                    continue
                _last = cl
                _first = _first or cl
                if path not in all_filenames:
                    print(line, file=fout)
                    if from_changelist < cl <= to_changelist:
                        yield line
                else:
                    all_filenames.remove(path)
            summary['Local cache'] = ((cache_cl, 1), (int(_first), int(_last)))
            highest_written_cl = max(highest_written_cl, int(_first))

        fout.close()
        fout = None
        if highest_written_cl:
            os.chmod(temp_fname, 0o444)
            os.rename(temp_fname, f'{cache_dir}/{highest_written_cl}.fstat.gz')
    finally:
        if fout:
            fout.close()
        try:
            if temp_fname:
                os.unlink(temp_fname)
        except FileNotFoundError:
            pass

    from texttable import Texttable
    table = Texttable()
    table.set_cols_align(['l', 'l', 'l'])
    table.set_header_align(['l', 'l', 'l'])
    table.header(['Fstat source', 'Requested', 'Provided'])
    table.set_chars(['-', '|', '+', '-'])
    table.set_deco(table.HEADER)
    for k in 'Perforce', 'Fstat server', 'Local cache':
        data = summary[k] if summary[k] else ('Not used', '')
        if summary[k]:
            v = summary[k]
            data = ('{:10,} - {:10,}'.format(
                (v[0][0] or 0), (v[0][1] or 0)), '{:10,} - {:10,}'.format(
                    (v[1][0] or 0), (v[1][1] or 0)))
        else:
            data = ('Not used', '')
        table.add_row([k, data[0], data[1]])
    table = '\n'.join('*** INFO: ' + row for row in table.draw().split('\n'))
    print(table, file=sys.stderr)
Exemple #17
0
    def check_against(self, other: 'Result', report: Callable[[str], None],
                      workarounds: Workarounds) -> bool:
        all_ok = True

        self_properties = self.get_normalized_properties()
        other_properties = other.get_normalized_properties()

        keys = sorted(
            list(
                set(
                    list(self_properties.keys()) +
                    list(other_properties.keys()))))

        table = Texttable()
        table.set_deco(Texttable.HEADER)
        table.set_cols_dtype(['t', 't', 't', 't'])
        table.header([
            'OK?', 'KEY',
            self.get_origin().name.upper(),
            other.get_origin().name.upper()
        ])
        table.set_cols_width([10, 60, 20, 20])
        table.set_header_align(['l', 'l', 'l', 'l'])

        def ignore_key(k: Tuple) -> bool:
            return k[
                0] == "results_tab" and workarounds.ignore_wrong_results_in_results_tab

        def make_is_close(eps: Decimal = Decimal("0.01")):
            def is_close(a: Union[str, MaybeDecimal],
                         b: Union[str, MaybeDecimal]) -> bool:
                a = MaybeDecimal(a)
                b = MaybeDecimal(b)

                if not a.valid() or not b.valid():
                    return False

                try:
                    return abs(a.to_decimal() - b.to_decimal()) <= eps
                except decimal.InvalidOperation:
                    print("could not compute is_close for (%s, %s)" % (a, b))
                    return False

            return is_close

        def is_exactly_equal(a, b) -> bool:
            return a == b

        comparators = dict()

        if workarounds.inaccurate_percentage_rounding:
            comparators[("statistics_tab",
                         "percentage_reached")] = make_is_close()
            comparators[("results_tab",
                         "percentage_reached")] = make_is_close()

        for k in keys:
            value_self = "%s" % self_properties.get(k, None)
            value_other = "%s" % other_properties.get(k, None)

            type_self = self.types.get(k, None)
            type_other = self.types.get(k, None)
            types = tuple(
                set(t for t in (type_self, type_other) if t is not None))

            value_self = workarounds.normalize(value_self)
            value_other = workarounds.normalize(value_other)

            if types == ('json', ):
                value_self = _normalize_json(value_self)
                value_other = _normalize_json(value_other)
            elif len(types) > 0:
                raise RuntimeError("incompatible property data types")

            is_equal = comparators.get(k, is_exactly_equal)

            if ignore_key(k):
                status = "IGNORED"
            elif is_equal(value_self, value_other):
                status = "OK"
            else:
                status = "FAIL"
                all_ok = False

            table.add_row([
                status, " / ".join(k),
                value_self.replace("\n", "\\n"),
                value_other.replace("\n", "\\n")
            ])

        for line in table.draw().split("\n"):
            report(line)

        if False:  # enable for further debugging
            if not all_ok:
                report("\n")
                report("full dump of properties of %s:" %
                       self.get_origin().name.upper())
                _dump_properties(self.properties, report)

                report("\n")
                report("full dump of properties of %s:" %
                       other.get_origin().name.upper())
                _dump_properties(other.properties, report)

        if self.errors:
            for type, err in self.errors.items():
                report("error %s:%s in %s" % (type, err, self.origin))
            all_ok = False
        if other.errors:
            for type, err in other.errors.items():
                report("error %s:%s in %s" % (type, err, other.origin))
            all_ok = False

        return all_ok
Exemple #18
0
    def check_against(self, other, report, workarounds):
        all_ok = True

        self_properties = self.get_normalized_properties()
        other_properties = other.get_normalized_properties()

        keys = sorted(
            list(
                set(
                    list(self_properties.keys()) +
                    list(other_properties.keys()))))

        table = Texttable()
        table.set_deco(Texttable.HEADER)
        table.set_cols_dtype(['t', 't', 't', 't'])
        table.header([
            'OK?', 'KEY',
            self.get_origin().name.upper(),
            other.get_origin().name.upper()
        ])
        table.set_cols_width([10, 60, 20, 20])
        table.set_header_align(['l', 'l', 'l', 'l'])

        for k in keys:
            value_self = "%s" % self_properties.get(k, None)
            value_other = "%s" % other_properties.get(k, None)

            type_self = self.types.get(k, None)
            type_other = self.types.get(k, None)
            types = tuple(
                set(t for t in (type_self, type_other) if t is not None))

            value_self = workarounds.normalize(value_self)
            value_other = workarounds.normalize(value_other)

            if types == ('json', ):
                value_self = _normalize_json(value_self)
                value_other = _normalize_json(value_other)
            elif len(types) > 0:
                raise RuntimeError("incompatible property data types")

            if value_self == value_other:
                status = "OK"
            else:
                status = "FAIL"
                all_ok = False

            table.add_row([
                status, " / ".join(k),
                value_self.replace("\n", "\\n"),
                value_other.replace("\n", "\\n")
            ])

        for line in table.draw().split("\n"):
            report(line)

        if not all_ok:
            report("\n")
            report("full dump of properties of %s:" %
                   self.get_origin().name.upper())
            _dump_properties(self.properties, report)

            report("\n")
            report("full dump of properties of %s:" %
                   other.get_origin().name.upper())
            _dump_properties(other.properties, report)

        if self.errors:
            for type, err in self.errors.items():
                report("error %s:%s in %s" % (type, err, self.origin))
            all_ok = False
        if other.errors:
            for type, err in other.errors.items():
                report("error %s:%s in %s" % (type, err, other.origin))
            all_ok = False

        return all_ok
def statAllUsers(callArgs):
    reply = ""
    calldataArgs = str.split(callArgs, '-')

    if calldataArgs[0] == "currentMonth":
        month = str(datetime.now().month)
        if len(month) == 1:                          # datetime.now().month возвращает месяц без ведущего нуля. Надо скорректировать.
            month = '0{0}'.format(month)
        stat = databaseProvider.getResultAllUsers(month=month)

        reply = "<pre>Статистика по всем пользователям за месяц {0}:\n".format(month)
        table = Texttable()
        table.set_deco(Texttable.BORDER | Texttable.HEADER)
        table.set_header_align(["c", "c", "c", "c"])
        table.header(["СОТРУДНИК", "ПЕРЕР.", "ОПОЗД.", "ИТОГО"])
        for i in range(0, len(stat)):
            userStat = stat[i]
            table.add_row([userStat[3], userStat[0], userStat[1], userStat[2]])
        reply += table.draw() 
        reply +="</pre>"

    elif calldataArgs[0] == "full":
        month = str(datetime.now().month)
        if len(month) == 1:                          # datetime.now().month возвращает месяц без ведущего нуля. Надо скорректировать.
            month = '0{0}'.format(month)
        stat = databaseProvider.getResultAllUsers(month="00", full=True)
        reply = "<pre>Статистика по всем пользователям за всё время:\n"
        table = Texttable()
        table.set_deco(Texttable.BORDER | Texttable.HEADER)
        table.set_header_align(["c", "c", "c", "c"])
        table.header(["СОТРУДНИК", "ПЕРЕР.", "ОПОЗД.", "ИТОГО"])
        for i in range(0, len(stat)):
            userStat = stat[i]
            table.add_row([userStat[3], userStat[0], userStat[1], userStat[2]])
        reply += table.draw() 
        reply +="</pre>"

    elif calldataArgs[0] == "lastWeek":
        lastWeek = datetime.isocalendar(datetime.now())[1] - 1
        if lastWeek < 1:
            reply = "ОШИБКА. К сожалению, информация за прошлый год недоступна в таком виде."
        stat = databaseProvider.getResultAllUsersByWeek(str(lastWeek))
        reply = "Статистика по всем пользователям за предыдущую неделю ({0}):\n" \
                "Формат: пользователь;переработки;опоздания;итог\n".format(getWeekDaysByWeekNumber(lastWeek))
        for i in range(0, len(stat)):
            userStat = stat[i]
            reply += "\n{0};{1};{2};{3}" \
                      .format(userStat[3], userStat[0], userStat[1], userStat[2])

    elif calldataArgs[0] == "currentWeek":
        currentWeek = datetime.isocalendar(datetime.now())[1]
        if currentWeek < 1:
            reply = "ОШИБКА. К сожалению, информация за прошлый год недоступна в таком виде."
        stat = databaseProvider.getResultAllUsersByWeek(str(currentWeek))
        reply = "Статистика по всем пользователям за текущую неделю ({0}):\n" \
                "Формат: пользователь;переработки;опоздания;итог\n".format(getWeekDaysByWeekNumber(currentWeek))
        for i in range(0, len(stat)):
            userStat = stat[i]
            reply += "\n{0};{1};{2};{3}" \
                      .format(userStat[3], userStat[0], userStat[1], userStat[2])

    elif calldataArgs[0] == "month":
        month = calldataArgs[1]
        if len(month) == 1:                          # datetime.now().month возвращает месяц без ведущего нуля. Надо скорректировать.
            month = '0{0}'.format(month)
        stat = databaseProvider.getResultAllUsers(month=month)
        reply = "<pre>Статистика по всем пользователям за месяц {0}:\n".format(month)
        table = Texttable()
        table.set_deco(Texttable.BORDER | Texttable.HEADER)
        table.set_header_align(["c", "c", "c", "c"])
        table.header(["СОТРУДНИК", "ПЕРЕР.", "ОПОЗД.", "ИТОГО"])
        for i in range(0, len(stat)):
            userStat = stat[i]
            table.add_row([userStat[3], userStat[0], userStat[1], userStat[2]])
        reply += table.draw() 
        reply +="</pre>"

    return reply
Exemple #20
0
def main():
    parser = argparse.ArgumentParser(
        add_help=True,
        description="Command-line utilities to interact with Copper Cloud.",
    )
    parser.add_argument(
        "--csv-output-file",
        dest="csv_output_file",
        default=None,
        help="Write output to CSV file.",
    )
    parser.add_argument(
        "--output-dir",
        dest="output_dir",
        default='generated',
        help="Write output to specified directory.",
    )
    parser.add_argument(
        "--quiet",
        dest="quiet",
        action="store_true",
        default=False,
        help="Suppress printing results to the console.",
    )
    parser.add_argument(
        "--debug",
        dest="debug",
        action="store_true",
        default=False,
        help="Enable debug output",
    )
    parser.add_argument(
        "--query-limit",
        type=int,
        dest="query_limit",
        default=None,
        help="Limit API query (for debugging purposes).",
    )

    subparser = parser.add_subparsers()

    parser_a = subparser.add_parser("bulk")
    parser_a.add_argument(
        "--detailed",
        dest="detailed",
        action="store_true",
        default=False,
        help="Enable detailed output",
    )
    parser_a.set_defaults(func=get_bulk_data)

    parser_b = subparser.add_parser("meter")
    subparser_b = parser_b.add_subparsers()
    parser_c = subparser_b.add_parser("usage")
    parser_c.add_argument(
        "--meter-id",
        dest="meter_id",
        default=None,
        help="Select a single meter to query.",
    )
    parser_c.add_argument(
        "--granularity",
        dest="granularity",
        default="hour",
        help="Set query granularity for time-series data.",
    )
    time_fmt = "%%Y-%%m-%%dT%%H:%%M:%%SZ"
    parser_c.add_argument("start",
                          help="Query start time, formatted as: " + time_fmt)
    parser_c.add_argument("end",
                          help="Query end time, formatted as: " + time_fmt)
    parser_c.set_defaults(func=get_meter_usage)
    parser_d = subparser_b.add_parser("check-for-water-reversals")
    parser_d.set_defaults(func=get_water_meter_reversals)
    parser_d.add_argument(
        "--check-limit",
        type=int,
        dest="check_limit",
        default=None,
        help="Limit number of homes to check (for debugging purposes).",
    )
    parser_d.add_argument(
        "--method",
        dest="method",
        default="summer",
        help="Method for checking [summer, winter]",
    )

    parser_prem = subparser.add_parser("premise")
    parser_prem.set_defaults(func=get_prem_data)

    args = parser.parse_args()

    # Walk through user login (authorization, access_token grant, etc.)
    cloud_client = CopperCloudClient(args, __make_bulk_url(limit=1))

    # https://bugs.python.org/issue16308
    try:
        func = args.func
    except AttributeError:
        parser.error("too few arguments")
    title, header, rows, dtypes = args.func(cloud_client)

    table = Texttable(max_width=0)
    table.set_deco(Texttable.HEADER)
    table.set_cols_dtype(dtypes)
    row_align = ["l"] * len(header)
    row_align[-1] = "r"
    table.set_header_align(row_align)
    table.set_cols_align(row_align)
    rows.insert(0, header)
    table.add_rows(rows)

    if not args.quiet:
        print("\n{title} (rows={num}):".format(title=title, num=len(rows) - 1))
        print(table.draw() + "\n")

    if args.csv_output_file:
        output_file = args.csv_output_file
        if not output_file.startswith('generated/'):
            output_file = os.path.join(cloud_client.args.output_dir,
                                       cloud_client.args.csv_output_file)
        __write_csvfile(output_file, rows)

    print("complete!")
Exemple #21
0
s0 = "A1"
s1 = "就发啦可视对讲额我只"
s2 = "e打机啊ifo1"
s3 = "e打机啊ifo11111111111111111"
s4 = "e打机啊ifo2"
s5 = "e打机啊ifo3"
s6 = "e打机啊ifo4"

l1 = [s0, s1, s2, s1, s5]
l2 = ["", s3, s6, s4, s5]
l3 = ["", s1, s3, s2, s5]
l4 = ["", s1, s1, s3, s5]
l5 = ["", s1, s1, s5, s3]
#l1 = [s0,s1]
#l2 = [s0,s1]
cols_dtype = []
cols_align = []
for _str in l1:
    cols_dtype.append('a')
    cols_align.append('l')
table.set_cols_dtype(cols_dtype)
table.set_cols_align(cols_align)
table.set_header_align(cols_align)
table.set_cols_width([5, 30, 30, 30, 30])

print([l1, l2])
table.add_rows([l1, l2, l3, l4, l5])

print(table.draw())
Exemple #22
0
def main():
    config = ConfigParser.ConfigParser()
    config.read(CONFIGFILE)

    # Initialize variables from configuration file.
    username = config.get('general', 'username')
    password = config.get('general', 'password')
    email_subject = config.get('general', 'email_subject')
    email_from = config.get('general', 'email_from')
    email_to = config.get('general', 'email_to')
    smtp_server = config.get('general', 'smtp_server')

    # Data structure to store output
    owed_table = {}

    # Let's retrieve the login page and grab the hidden form input variables.
    page = requests.get(LOGIN_URL)
    tree = html.fromstring(page.content)
    form = tree.xpath('//form[@id="Form1"]')[0]

    # Look for <input> elements with type=hidden.  Grab their names and values
    # and store in our form data dict.  This is a dict comprehension that is
    # available only in Python 2.7+.
    formdata = {
        i.name: i.value
        for i in form.xpath('.//input') if i.type == 'hidden'
    }

    # We still need to manually handle one more input.
    formdata['LoginControl1:OSLoginBN'] = 'Login'

    # Now add our username and password.
    formdata['LoginControl1:OSUserNameTB'] = username
    formdata['LoginControl1:OSPasswordTB'] = password

    # Set up our querystring.
    querystring = {'ReturnUrl': '/OneStop/default.aspx'}

    # The requests module can automatically follow the redirect from a
    # successful login.  We don't really do any graceful error handling here.
    page = requests.post(LOGIN_URL,
                         data=formdata,
                         allow_redirects=True,
                         params=querystring)

    # Grab our "root" and then find the structure with the information we
    # want.
    tree = html.fromstring(page.content)
    accounts = tree.xpath('//*[@id="AccountDetailsDG"]')[0]

    # Plan:
    #  - Iterate by child table element under AccountDetailsDG.
    #  - If we find BalanceDue > 0, gather display relevant account info

    for acct_table in accounts.xpath('.//table'):
        balance = acct_table.xpath(
            r".//span[re:match(@id, '.*BalanceDueLBL.*')]",
            namespaces={"re": 'http://exslt.org/regular-expressions'
                        })[0].text_content()
        owed = Decimal(re.sub(r'[^\d.]', '', balance))

        if owed > MINBALANCE:
            # Get the account number
            acct_num = acct_table.xpath(
                r".//span[re:match(@id, '.*AccountNumberLBL.*')]",
                namespaces={"re": 'http://exslt.org/regular-expressions'
                            })[0].text_content()

            # Initialize a dictionary for storing our results.
            owed_table[acct_num] = {}
            # Store the owed balance
            owed_table[acct_num]['balance'] = balance

            # Get the name on the account
            acct_name = acct_table.xpath(
                r".//span[re:match(@id, '.*AccountNameLBL.*')]",
                namespaces={"re": 'http://exslt.org/regular-expressions'
                            })[0].text_content()
            # Store the account name
            owed_table[acct_num]['acct_name'] = acct_name

    # If we have data to share, prep it and email it out.
    if len(owed_table.keys()) > 0:
        # Initiate a Texttable object for display
        t = Texttable(max_width=78)
        t.set_deco(Texttable.HEADER)
        t.set_cols_dtype(['t', 't', 'a'])
        t.set_cols_align(['l', 'l', 'r'])
        t.set_header_align(['l', 'l', 'l'])
        t.header(["Account", "Name", "Balance"])

        # Populate content.
        for x in owed_table.keys():
            t.add_row(
                [x, owed_table[x]['acct_name'], owed_table[x]['balance']])

        # Print the content to stdout
        output = t.draw()

        msg = MIMEText(output)
        msg['Subject'] = email_subject
        msg['From'] = email_from
        msg['To'] = email_to
        msg['Importance'] = 'High'

        s = smtplib.SMTP(smtp_server)
        s.sendmail(email_from, [email_to], msg.as_string())
        s.quit()
Exemple #23
0
def show_incident():
    global args
    global db
    if args.incident not in db:
        print("# cannot find {}".format(args.incident))
        sys.exit(-1)
    #print(json.dumps(db[args.incident],indent=2,sort_keys=True))
    cout = ""
    mout = ""
    wout = "<html>\n<header>\n"
    wout += "<tt>\n"
    i = db[args.incident]
    mout += "## {}\n".format(i["brief"])
    mout += "### {} \n".format(i["incident_number"])
    mout += "| __meta__ | __value__ |\n"
    mout += "| ---- | ---- |\n"
    cout += "# ({}) {}\n\n".format(i["incident_number"], i["brief"])
    wout += "<title>{} Postmortem</title>\n".format(i["incident_number"])
    wout += "<h2>{} - {}</h2>\n".format(i["incident_number"], i["brief"])
    wout += "</header>\n"
    wout += "<table>\n"
    mout += "|{}|{}|\n".format("CreateTime", i["create_time"])
    cout += "Create_time :\n    {}\n".format(i["create_time"])
    wout += "<tr><td><b>CreateTime</b></td><td>{}</td></tr>\n".format(
        i["create_time"])
    for keym in [
            "OncallOps", "Status", "Impact", "RootCause", "DurationOfProblem",
            "ServiceImpacted", "%Impacted", "UserImpact", "RevenueImpact",
            "HowToRepeat", "Resolution", "References", "LessonLearned",
            "ActionItem", "KeywordsForSearch"
    ]:
        if keym in i and i[keym]:
            cout += "{} : \n".format(keym)
            cout += "    {}\n".format(i[keym])
            wout += "<tr><td><b>{}</b></td><td>{}</td></tr>\n".format(
                keym, i[keym])
            mout += "| {} | {} |\n".format(keym, i[keym])
        else:
            if args.post:
                i[keym] = input("# {}=".format(keym))
                cout += "{} : \n".format(keym)
                cout += "    {}\n".format(i[keym])
                wout += "<tr><td><b>{}</b></td><td>{}</td></tr>\n".format(
                    keym, i[keym])
                mout += "| {} | {} |\n".format(keym, i[keym])
                closedb()
    wout += "</table>\n"
    if args.json:
        print(json.dumps(i, indent=2, sort_keys=True))
        return
    cout += "\n\n# timeline   : \n"
    wout += "<h2>Timeline    :</h2>\n"
    mout += "\n\n"
    mout += "### {} \n".format("Timeline :")
    mout += "| __datetime__ | __brief__ | __detail__ | \n"
    mout += "| ---- | ---- | ---- | \n"
    #wout += "<table style=\"border-collapse: collapse;border: 1px;\">\n"
    wout += "<table border=1 style=\"border-collapse: collapse;\">\n"
    wout += "<tr style=\"background-color:#F3F3F3\"><th>update_time</th><th>brief</th><th>detail</th></tr>\n"
    table = Texttable()
    table.set_header_align(['l', 'l', 'l'])
    table.set_cols_align(['l', 'l', 'l'])
    table.set_chars([' ', ' ', ' ', '-'])
    maxw = 0
    for x in [ u.get("xcontent","") for u in i.get("updates",dict()) ] :
        for l in x.splitlines() :
            if len(l) > maxw :
                maxw = len(l)
    table.set_cols_width([12, 25, maxw+1])
    rows = list()
    rows.append(["update_time", "brief", "detail"])
    for u in sorted(i.get("updates", []),
                    key=lambda x: x["create_time"],
                    reverse=True):
        rows.append([
            u["create_time"][:-3], u["update_content"],
            u.get("xcontent", "")
        ])
        mout += "| {} | {} | <pre>{}</pre> | \n".format(
            u["create_time"][:-3], u["update_content"],
            re.sub(
                r"([\\\`\*\_\{\}\[\]\(\)#+-.!|])", r"\\\1",
                u.get("xcontent", "").replace("\n",
                                              "</br>").replace("~", "\$HOME")))
        wout += "<tr>\n"
        for val in [
                u["create_time"][:-3], u["update_content"],
                u.get("xcontent", "")
        ]:
            wout += "<td>{}</td>\n".format(
                val.replace("\n", "</br>").replace(" ", "&nbsp;"))
        wout += "</tr>\n"

    mout += "\n\n"
    table.add_rows(rows)
    stbl = table.draw()
    stbl = "\n".join([re.sub(r"\s+$", "", ln) for ln in stbl.splitlines()])
    cout += stbl + "\n"
    wout += "</table>\n"
    wout += "</html>\n"
    if args.export:
        if args.expand:
            incdir = args.incident + "." + "".join(
                c for c in re.sub(r"\s+", ".", i["brief"])
                if c in set(string.ascii_letters + string.digits + "_-."))
        else:
            incdir = args.incident
        if not os.path.exists(incdir):
            os.mkdir(incdir)
        with open("{}/{}.html".format(incdir, args.incident), "w") as f:
            f.write(wout)
        with open("{}/{}.txt".format(incdir, args.incident), "w") as f:
            f.write(cout)
        with open("{}/{}.md".format(incdir, args.incident), "w") as f:
            f.write(mout)
        with open("{}/{}.json".format(incdir, args.incident), "w") as f:
            f.write(json.dumps(db[args.incident], indent=2, sort_keys=True))
        sys.exit(0)
    if args.html:
        print(wout)
    elif args.markdown:
        print(mout)
    else:
        print(cout)
Exemple #24
0
def liveTrack(args, orbData):
    if args.id is None:
        track_ids = orbData.getSatellites()
    else:
        track_ids = [args.id]
    # calculate current satellite data
    visible = list()
    now = datetime.utcnow()
    for satID in track_ids:
        try:
            (azim, elev) = orbData.getAzimElev(satID, now, args.lat, args.lon,
                                               args.alt)
            # Check if satellite below visibility horizon
            if args.id is None and elev < args.horizon:
                continue
            (alt, dist, vel,
             vel_r) = orbData.getDistance(satID, now, args.lat, args.lon,
                                          args.alt)
            elev_max = orbData.getMaxElev(satID, now, args.lat, args.lon,
                                          args.alt)
            visible.append({
                'satID': satID,
                'azim': azim,
                'elev': elev,
                'dist': dist,
                'vel': vel,
                'vel_r': vel_r,
                'elev_max': elev_max,
                'alt': alt
            })
        except NotImplementedError:
            pass

    # clear display
    sys.stdout.write("\x1b[H\x1b[2J")

    # update display
    print "%3s %-25s %7s %4s %7s %-28s [%8s UTC]" % (
        '#', 'Name', 'Azim', 'Elev', 'Dist', 'Comm', now.strftime('%H:%M:%S'))
    print "[ACTIVE SATS]---------------------------------------------------------------------------------"
    table = Texttable()
    table.set_deco(0)
    table.set_max_width(0)
    table.header('# Name Azim Elev Dist Vel Comm'.split())
    table.set_header_align('r l r r r r l'.split())
    table.set_cols_align('r l r r r r l'.split())
    table.set_cols_dtype('t t t t t t t'.split())
    row = 1
    for entry in sorted(visible, key=lambda x: x['elev'], reverse=True):
        satID, azim, elev, dist, vel, vel_r = [
            entry[x] for x in 'satID azim elev dist vel vel_r'.split()
        ]

        name = orbData.getName(satID)
        comm = orbData.getSatInfo(satID)
        (up, down, beacon, mode, status, name2) = comm
        if status != 'active' and status != 'Operational':
            continue
        commList = list()
        if down: commList.append('D[%s]' % down)
        if up: commList.append('U[%s]' % up)
        if beacon: commList.append('B[%s]' % beacon)
        if mode: commList.append('%s' % mode)
        comm = ' '.join(commList)
        table.add_row(("%d|%s|%.0f|%.0f|%.0f|%.2f|%s" %
                       (row, name, azim, elev, dist, vel_r, comm)).split('|'))
        row += 1

    print table.draw()
    print

    print "[OTHER  SATS]---------------------------------------------------------------------------------"
    table = Texttable()
    table.set_deco(0)
    table.set_max_width(0)
    table.header('# Name Azim Elev Dist Vel Comm'.split())
    table.set_header_align('r l r r r r l'.split())
    table.set_cols_align('r l r r r r l'.split())
    table.set_cols_dtype('t t t t t t t'.split())
    row = 1
    for entry in sorted(visible, key=lambda x: x['elev'], reverse=True)[:10]:
        satID, azim, elev, dist, vel_r = [
            entry[x] for x in 'satID azim elev dist vel_r'.split()
        ]
        name = orbData.getName(satID)
        comm = orbData.getSatInfo(satID)
        (up, down, beacon, mode, status, name2) = comm
        if status == 'active' or status == 'Operational':
            continue
        commList = list()
        commList.append(status)
        comm = ' '.join(commList)
        table.add_row(("%d|%s|%.0f|%.0f|%.0f|%.2f|%s" %
                       (row, name, azim, elev, dist, vel_r, comm)).split('|'))
        row += 1
    print table.draw()