Ejemplo n.º 1
0
def process(workbook: Any, content: str) -> tuple:
    """Process Storage-Array-Summary worksheet

    Also returns a list of array names used in other sheets

    :param workbook:
    :param content:
    :return:
    """
    worksheet = workbook.get_sheet_by_name('Storage-Array-Summary')

    headers = list(concat([
        get_parser_header(ARRAY_NAME_TMPL),
        get_parser_header(GET_ARRAY_UID_TMPL),
        get_parser_header(GET_AGENT_TMPL)
    ]))
    RowTuple = namedtuple('RowTuple', headers)   # pylint: disable=invalid-name

    build_header(worksheet, headers)

    cmd_arrayname_out = run_parser_over(content, ARRAY_NAME_TMPL)
    cmd_getarrayuid_out = run_parser_over(content, GET_ARRAY_UID_TMPL)
    cmd_getagent_out = run_parser_over(content, GET_AGENT_TMPL)

    # noinspection PyTypeChecker
    cmd_out = map(compose(
        list,
        concat),
        zip(
            cmd_arrayname_out,
            cmd_getarrayuid_out,
            cmd_getagent_out))

    array_names = defaultdict(str)    # type: defaultdict
    rows = check_empty_arrays(list(unique(cmd_out, key=itemgetter(0, 1))))
    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            cell.value = str.strip(col_value)
            style_value_cell(cell)
            set_cell_to_number(cell)
            final_col = col_n
        array_names[worksheet['Q{}'.format(row_n)].value] = \
            worksheet['A{}'.format(row_n)].value
        final_row = row_n

    sheet_process_output(
        worksheet,
        'StorageArraySummaryTable',
        'Storage-Array-Summary',
        final_col,
        final_row)

    array_models = groupby(itemgetter(12), rows)
    array_revisions = groupby(itemgetter(10), rows)
    return array_names, array_models, array_revisions
Ejemplo n.º 2
0
def process(workbook: Workbook, content: list) -> None:
    """Process LUNsPivot worksheet

    :param workbook:
    :param content:
    """
    worksheet_name = 'LUN-Storage_Pivot'
    worksheet = workbook.get_sheet_by_name(worksheet_name)

    state_header = ['ArrayName', 'LUNCount', 'SumOfLUNCapacityTB']

    StateTuple = namedtuple('StateTuple', state_header)

    build_header(worksheet, state_header)

    array_storage_groups = groupby(itemgetter(0, 3), content)
    array_groups = groupby(itemgetter(0), array_storage_groups)

    state_rows, grand_total = [], [0, 0]  # type: list, list
    for array in array_groups:
        lun_count, lun_capacity, storage_array_rows = 0, 0, []
        for array_group in array_groups[array]:
            array_luns = list(zip(*array_storage_groups[array_group]))

            row = [
                array_group[1],
                len(array_storage_groups[array_group]),
                sum(map(float, array_luns[12]))
            ]

            lun_count += row[1]
            lun_capacity += row[2]
            storage_array_rows.append(map(str, row))

        grand_total[0], grand_total[1] = grand_total[0] + lun_count, \
            grand_total[1] + lun_capacity
        state_rows += [[array, str(lun_count),
                        str(lun_capacity)], *storage_array_rows]

    state_rows.append(['Grand Total', *grand_total])
    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(StateTuple._make, state_rows), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            col_value = str(col_value) \
                if not isinstance(col_value, str) else col_value
            cell.value = col_value
            style_value_cell(cell)
            if row_tuple.ArrayName \
                    in [sg for sg in array_groups.keys()] + ['Grand Total']:
                cell.font = Font(bold=True, size=11)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(worksheet, 'LUNStorageTable', 'LUN-Storage_Pivot',
                         final_col, final_row)
Ejemplo n.º 3
0
def process(workbook: Any, nfs_rows: list, smb_rows: list) -> None:
    """Process File System by Protocol Sheet

    :param workbook:
    :param nfs_rows:
    :param smb_rows:
    """
    worksheet_name = 'File System by Protocol'
    worksheet = workbook.get_sheet_by_name(worksheet_name)

    headers = ['Hostname', 'FileSystem', 'Path', 'Type']

    RowTuple = namedtuple('RowTuple', headers)
    build_header(worksheet, headers)

    key_columns = (0, 1)
    rows = groupby(itemgetter(*key_columns), nfs_rows + smb_rows)
    rows = [get_multiprotocol(rows[i]) for i in rows]

    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(RowTuple._make, rows), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            cell.value = str.strip(col_value)
            style_value_cell(cell)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(worksheet, 'FileSystemProtocolTable',
                         'File System by Protocol', final_col, final_row)
Ejemplo n.º 4
0
 def _createIndiceConverter(
         innerIndiceMap: Dict[Indice, int], coeffs: List[Sequence[float]], oi: int,
         _ls: List[List[Sequence[float]]], _boole: bool = False
 ) -> List[Sequence[float]]:
     if list(innerIndiceMap.keys())[0] is ():
         return coeffs
     else:
         innerIndiceMapGroup: Dict[int, List[Tuple[Indice, int]]] = compose(
             valmap(compose(
                 lambda v: sorted(v, key=lambda k: k[0]),
                 map(lambda l: (l[0][1:], l[1])))),
             groupby(lambda kv: kv[0][0]))(innerIndiceMap.items())
         outArr = list(innerIndiceMapGroup.keys())
         inArr = list(valmap(lambda v: v[0][-1], innerIndiceMapGroup).values())
         coeff = self.getIndiceTransformCoeffs(outArr, inArr, oi, _ls, _boole)
         nextInnerIndiceMapGroup: Dict[int, Dict[Indice, int]] = valmap(dict, innerIndiceMapGroup)
         coeffsList = [
             _createIndiceConverter(
                 self.applyIndiceTransform(nextInnerIndiceMap, key, coeff),
                 [*coeffs, coeff], oi + 1, _ls, _boole
             )
             for key, nextInnerIndiceMap in nextInnerIndiceMapGroup.items()
         ]
         if allSame(coeffsList):
             return coeffsList[0]
         else:
             raise LinearError
Ejemplo n.º 5
0
def process(workbook: Workbook, content: list) -> None:
    """Process InitiatorType worksheet

    :param workbook:
    :param content:
    """
    worksheet_name = 'InitiatorType'
    worksheet = workbook.get_sheet_by_name(worksheet_name)

    speed_header = ['ArrayName', 'SumRegisteredInitiators']

    RowTuple = namedtuple('RowTuple', speed_header)  # pylint: disable=invalid-name

    build_header(worksheet, speed_header)

    type_array_groups = groupby(itemgetter(-2, 0), content)
    type_groups = groupby(itemgetter(0), type_array_groups)

    initiator_rows = []  # type: list
    for array_type in type_groups:
        total_initiators = 0
        array_rows = []
        for array in type_groups[array_type]:
            array_initiators = list(zip(*type_array_groups[array]))

            row = [array[1], sum(map(int, array_initiators[3]))]
            total_initiators += row[1]
            array_rows.append(map(str, row))
        initiator_rows += [[array_type] + [str(total_initiators)], *array_rows]

    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(RowTuple._make, initiator_rows), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            col_value = str(col_value) \
                if not isinstance(col_value, str) else col_value
            cell.value = str.strip(col_value)
            style_value_cell(cell)
            if row_tuple.ArrayName in type_groups.keys():
                cell.font = Font(bold=True, size=11)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(worksheet, 'InitiatorTypeTable', 'InitiatorType',
                         final_col, final_row)
Ejemplo n.º 6
0
 def grouped(self, group, key=lambda x: x.weight):
     if group == 'siblings' and not self.parent:
         return {self.weight: [self]}
     elif group in {'siblings', 'children'}:
         agg = self.siblings if group == 'siblings' else self.children
         return cc.pipe(((key(x), x) for x in agg),
                        cc.groupby(lambda x: x[0]),
                        cc.valmap(lambda x: [y[1] for y in x]))
     else:
         return {}
Ejemplo n.º 7
0
    def update(self):
        with log_errors():
            log = self.steal.log
            n = self.steal.count - self.last
            log = [log[-i] for i in range(1, n + 1)]
            self.last = self.steal.count

            if log:
                new = pipe(log, map(groupby(1)), map(dict.values), concat,
                           map(self.convert), list, transpose)
                self.source.stream(new, 10000)
Ejemplo n.º 8
0
    def update(self):
        with log_errors():
            log = self.steal.log
            n = self.steal.count - self.last
            log = [log[-i] for i in range(1, n + 1)]
            self.last = self.steal.count

            if log:
                new = pipe(log, map(groupby(1)), map(dict.values), concat,
                           map(self.convert), list, transpose)
                if PROFILING:
                    curdoc().add_next_tick_callback(
                        lambda: self.source.stream(new, 10000))
                else:
                    self.source.stream(new, 10000)
Ejemplo n.º 9
0
def tar_pattern_filter(tar_file: Iterable, patterns: tuple) -> list:
    """Filters the content by the file name if it matches any of the patterns

    :param tar_file:
    :param patterns:
    :return:
    """
    content_dict = groupby(lambda x: x.name.split('/')[0], tar_file)
    contents = []
    for key in content_dict:
        for pattern in patterns:
            for file_content in content_dict[key]:
                if fnmatch(file_content.name, pattern):
                    contents.append(file_content)
    return contents
Ejemplo n.º 10
0
def embedding_groups(
        node_list: List[T],
        persona_embedding_list: List[np.ndarray]) -> Dict[T, List[np.ndarray]]:
    """
    Utility function, which given aligned list of nodes and embedding lists from the model.predict function,
    obtain a dictionary from base graph nodes to a list of embeddings. The order of the embeddings for the
    base nodes is not ordered, and the order may differ on different calls.

    :param node_list: list of base nodes, which is duplicated
    :param persona_embedding_list: corresponding embeddings
    :return: dictionary mapping base nodes to all their embeddings
    """
    return pipe(
        zip(node_list, persona_embedding_list),
        groupby(0),
        valmap(lambda x: list(map(getter(1), x))),
    )
Ejemplo n.º 11
0
def process(workbook: Any, content: tuple) -> None:
    """Process Performance Dashboard worksheet

    :param workbook:
    :param content:
    """
    worksheet = workbook.get_sheet_by_name('Performance Dashboard')

    header = [
        'Cluster', 'Top Level Directories', 'No of Directories', 'No of Files',
        'Throughput Avg', 'Operations Avg', 'Ops 95th Percentile',
        'Latency Avg', 'Total', '< 128k', '< 128k Percent', '> 128k',
        '> 128k Percent'
    ]
    build_header(worksheet, header)

    data = [groupby(itemgetter(0), cont) for cont in content]

    data_dict = defaultdict(tuple)  # type: dict
    for pos, _ in enumerate(data):
        for key in data[pos]:
            data_dict[key] = (*data_dict[key], data[pos][key])

    rows = []  # type: list
    for cluster in data_dict:
        rows.append([cluster] +
                    list(map(str, perf_dashboard(data_dict[cluster]))))

    rows.append(['Total'] + list(map(str, perf_dashboard(content))))

    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(rows, 2):
        for col_n, col_value in \
                enumerate(row_tuple, ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            cell.value = str.strip(col_value)
            set_cell_to_number(cell, '0.00')
            style_value_cell(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(worksheet, 'PerformanceDashboardTable',
                         'Performance Dashboard', final_col, final_row)
Ejemplo n.º 12
0
def get_interactions():
    dates = sorted(set(map(_g('date'), data['interactions'])))
    d = t.pipe(data['interactions'],
               tc.groupby(lambda i: i.student),
               tc.valmap(lambda x: t.pipe(t.groupby(lambda i: i.date,x),
                                          tc.valmap(lambda v: [v[0].time_in, v[0].time_out]))))

    mat = [['student'] + dates]
    for student, attendance in d.items():
        record = [student]
        for dt in dates:
            if dt in attendance:
                record.append(attendance[dt])
            elif dt in data['students'][student].absences:
                record.append(('',''))
            else:
                record.append((None,None))
        mat.append(record)

    return {'interactions': mat}
Ejemplo n.º 13
0
# In[29]:

clf.fit(docs, labels)
clf.predict(docs)

# In[30]:


def get_step_by_name(pipe, name):
    return [trans for name_, trans in pipe.steps if name_.startswith(name)][0]


# In[31]:

cnt_vects_pipe = get_step_by_name(tfidf_pipe, "cnt_vects")

cnt_vects = [
    get_step_by_name(pipe, "cnt_vect_")
    for _name, pipe in cnt_vects_pipe.transformer_list
]

vocabulary_map = pipe(
    enumerate(concat(cnt_vect.vocabulary_ for cnt_vect in cnt_vects)),
    groupby(get(1)),
    valmap(lambda vals: list(pluck(0, vals))),
)
vocabulary_map

# In[ ]:
Ejemplo n.º 14
0
def process(workbook: Workbook, content: list) -> None:
    """Process LUNsPivot worksheet

    :param workbook:
    :param content:
    """
    worksheet_name = 'LUNsPivot'
    worksheet = workbook.get_sheet_by_name(worksheet_name)

    state_header = ['ArrayName', 'SumOfLUNCapacityTB']
    private_header = ['PrivateArrayName', 'SumOfLUNCapacityTB', 'Count']

    StateTuple = namedtuple('StateTuple', state_header)
    PrivateTuple = namedtuple('PrivateTuple', private_header)

    build_header(worksheet, state_header)
    build_header(worksheet, private_header, 'D')

    state_array_groups = groupby(itemgetter(7, 0), content)
    state_groups = groupby(itemgetter(0), state_array_groups)

    private_array_groups = groupby(itemgetter(15, 0), content)
    private_groups = groupby(itemgetter(0), private_array_groups)

    state_rows, grand_total = [], 0  # type: list, float
    for state in state_groups:
        state_sum, array_rows = 0, []
        for array in state_groups[state]:
            array_luns = list(zip(*state_array_groups[array]))

            row = [array[1], sum(map(float, array_luns[12]))]

            state_sum += row[1]
            array_rows.append(map(str, row))
        grand_total += state_sum
        state_rows += [[state] + [str(state_sum)], *array_rows]

    state_rows += [['Grand Total', str(grand_total)]]
    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(StateTuple._make, state_rows), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            col_value = str(col_value) \
                if not isinstance(col_value, str) else col_value
            cell.value = col_value
            style_value_cell(cell)
            if row_tuple.ArrayName \
                    in [sg for sg in state_groups.keys()] + ['Grand Total']:
                cell.font = Font(bold=True, size=11)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(worksheet, 'BoundTable', 'LUNsPivot', final_col,
                         final_row)

    private_rows, private_total = [], [0, 0]  # type: list, list
    for private in private_groups:
        private_sum, array_rows = [0, 0], []
        for array in private_groups[private]:
            array_luns = list(zip(*private_array_groups[array]))

            row = [array[1], sum(map(float, array_luns[12])), len(array_luns)]

            private_sum = [x + y for x, y in zip(private_sum, row[1:])]
            array_rows.append(map(str, row))
        private_total = [x + y for x, y in zip(private_total, private_sum)]
        private_rows += [[private] + list(map(str, private_sum)), *array_rows]

    private_rows += [['Grand Total'] + private_total]
    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(PrivateTuple._make, private_rows),
                                      2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('D')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            col_value = str(col_value) \
                if not isinstance(col_value, str) else col_value
            cell.value = col_value
            style_value_cell(cell)
            if row_tuple.PrivateArrayName \
                    in [pg for pg in private_groups.keys()] + ['Grand Total']:
                cell.font = Font(bold=True, size=11)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(worksheet,
                         'PrivateTable',
                         'LUNsPivot',
                         final_col,
                         final_row,
                         start_col=ord('D'))
acc1 = pipe(accounts, filter(lambda account: account[2] > 150),
            map(get([1, 2])), list)
print(acc1)

# List comprehensions version (more Pythonic):
acc2 = [(name, balance) for (id, name, balance, gender) in accounts
        if balance > 150]
print(acc2)

# II. SPLIT-APPLY-COMBINE WITH `GROUPBY` AND `REDUCEBY`:
# 1. Split the dataset into groups by some property
# 2. Reduce each of the groups with some synopsis function

# In Memory Split-Apply-Combine
# SELECT gender, SUM(balance) FROM accounts GROUP BY gender;
print(groupby(get(3), accounts))
# {'M': [(2, 'Bob', 200, 'M'), (3, 'Charlie', 150, 'M'), (4, 'Dennis', 50, 'M')], 'F': [(1, 'Alice', 100, 'F'), (5, 'Edith', 300, 'F')]}
print(pipe(accounts, groupby(get(3)), valmap(compose(sum, pluck(2)))))

# {'M': 400, 'F': 400} (pluck item )

# Streaming Split-Apply-Combine
# the groupby operation is not streaming and so this approach is limited
# to datasets that can fit comfortably into memory.
# to achieves streaming split-apply-combine use `reduceby()`


# The `reduceby` operation takes a key function,
# like `get(3)` or `lambda x: x[3]`, and a binary operator like
# `add` or `lesser = lambda acc, x: acc if acc < x else x`.
# It successively applies the key function to each item in succession,
Ejemplo n.º 16
0
def process(workbook: Workbook, content: list) -> None:
    """Process DisksPivot worksheet

    :param workbook:
    :param content:
    """
    worksheet_name = 'DisksPivot'
    worksheet = workbook.get_sheet_by_name(worksheet_name)

    type_header = ['ArrayName', 'DiskCount', 'SumOfCapacityTB']
    speed_header = ['SpeedArrayName', 'DiskCount']

    TypeTuple = namedtuple('TypeTuple', type_header)
    SpeedTuple = namedtuple('SpeedTuple', speed_header)

    build_header(worksheet, type_header)
    build_header(worksheet, speed_header, 'E')

    type_array_groups = groupby(itemgetter(-4, 0), content)
    type_groups = groupby(itemgetter(0), type_array_groups)

    speed_array_groups = groupby(itemgetter(-3, 0), content)
    speed_groups = groupby(itemgetter(0), speed_array_groups)

    type_rows, grand_total = [], [0, 0]  # type: list, list
    for row_type in type_groups:
        type_sum, array_rows = [0, 0], []
        for array in type_groups[row_type]:
            array_disks = list(zip(*type_array_groups[array]))

            row = [array[1], len(array_disks), sum(map(float, array_disks[7]))]

            type_sum = [x + y for x, y in zip(type_sum, row[1:])]
            array_rows.append(map(str, row))
        grand_total = [x + y for x, y in zip(grand_total, type_sum)]
        type_rows += [[row_type] + list(map(str, type_sum)), *array_rows]

    type_rows += [['Grand Total'] + grand_total]
    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(TypeTuple._make, type_rows), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            col_value = str(col_value) \
                if not isinstance(col_value, str) else col_value
            cell.value = col_value
            style_value_cell(cell)
            if row_tuple.ArrayName \
                    in [sg for sg in type_groups.keys()] + ['Grand Total']:
                cell.font = Font(bold=True, size=11)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(worksheet, 'TypeTable', 'DisksPivot', final_col,
                         final_row)

    speed_rows, speed_total = [], [0, 0]  # type: list, list
    for speed in speed_groups:
        speed_sum, array_rows = [0, 0], []
        for array in speed_groups[speed]:
            array_disks = list(zip(*speed_array_groups[array]))

            row = [array[1], len(array_disks)]

            speed_sum = [x + y for x, y in zip(speed_sum, row[1:])]
            array_rows.append(map(str, row))
        speed_total = [x + y for x, y in zip(speed_total, speed_sum)]
        speed_rows += [[speed] + list(map(str, speed_sum)), *array_rows]

    speed_rows += [['Grand Total'] + speed_total]
    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(SpeedTuple._make, speed_rows), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('E')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            col_value = str(col_value) \
                if not isinstance(col_value, str) else col_value
            cell.value = col_value
            style_value_cell(cell)
            if row_tuple.SpeedArrayName \
                    in [pg for pg in speed_groups.keys()] + ['Grand Total']:
                cell.font = Font(bold=True, size=11)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(worksheet,
                         'DiskSpeedTable',
                         'DisksPivot',
                         final_col,
                         final_row,
                         start_col=ord('E'))
Ejemplo n.º 17
0
def process(workbook: Any, content: str) -> Any:
    """Process Storage-Groups worksheet

    :param workbook:
    :param content:
    :return:
    """

    worksheet_name = 'Storage-Groups'
    worksheet = workbook.get_sheet_by_name(worksheet_name)

    headers = list(concat([
        get_parser_header(PORT_TMPL),
        get_parser_header(STORAGEGROUP_TMPL)[3:],
    ]))
    RowTuple = namedtuple('RowTuple', headers)  # pylint: disable=invalid-name

    build_header(worksheet, headers)

    cmd_storagegroup_out = run_parser_over(content, STORAGEGROUP_TMPL)
    cmd_port_out = run_parser_over(content, PORT_TMPL)

    common_columns = (0, 1)
    server_names_grouped = compose(
        valmap(
            compose(list, set, map(last))),
        groupby(
            itemgetter(*common_columns))
    )(cmd_port_out)

    cmd_port_relevant = map(
        juxt(
            compose(first, first),
            compose(second, first),
            second)
    )(server_names_grouped.items())

    common_columns_getter = itemgetter(*common_columns)
    cmd_merged_out = join(
        common_columns_getter, cmd_port_relevant,
        common_columns_getter, cmd_storagegroup_out)

    cmd_merged_out = sorted(cmd_merged_out)

    rows = list(map(
        compose(
            list,
            concat,
            juxt(
                first,
                compose(
                    drop(3),
                    second)))
    )(cmd_merged_out))

    portcmd = {(array, grp) for array, grp, *other in rows}
    strgp = {(array, grp) for array, grp, *other in cmd_storagegroup_out}
    no_server_groups = strgp - portcmd

    storage_list = list(filter(
        lambda storage_gr: any(
            fnmatch(str((storage_gr[0], storage_gr[1])), str(ctrlServer))
            for ctrlServer in no_server_groups),
        cmd_storagegroup_out))

    storage_list = check_empty_arrays(
        list(unique(storage_list + rows, key=itemgetter(0, 1))))

    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(RowTuple._make, storage_list), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            if isinstance(col_value, str):
                cell.value = str.strip(col_value)
            else:
                cell.alignment = Alignment(wrapText=True)
                cell.value = '\n'.join(col_value)
            style_value_cell(cell)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(
        worksheet,
        'StorageGroupsTable',
        'Storage-Groups',
        final_col,
        final_row)

    return [[lun_map[0], lun_map[1], lun_map[4]] for lun_map in storage_list]
Ejemplo n.º 18
0
def process(workbook: Workbook, content: tuple) -> None:
    """Process StorageArrayPivot worksheet

    :param workbook:
    :param content:
    """
    worksheet_name = 'StorageArrayPivot'
    worksheet = workbook.get_sheet_by_name(worksheet_name)

    speed_header = [
        'ArrayName', 'SumRegisteredInitiators',
        'SumLoggedInInitiators', 'SumNotLoggedInInitiators'
    ]
    model_header = ['ArrayModel', 'Count']
    revision_header = ['Revision', 'Count']

    SpeedTuple = namedtuple(
        'RowTuple', speed_header)  # pylint: disable=invalid-name
    ModelTuple = namedtuple(
        'RowTuple', model_header)  # pylint: disable=invalid-name
    RevisionTuple = namedtuple(
        'RowTuple', model_header)  # pylint: disable=invalid-name

    build_header(worksheet, speed_header)
    build_header(worksheet, model_header, 'F')
    build_header(worksheet, revision_header, 'I')

    speed_array_groups = groupby(itemgetter(6, 0), content[0])
    speed_groups = groupby(itemgetter(0), speed_array_groups)

    speed_rows, grand_total = [], [0, 0, 0]  # type: list, list
    for speed in speed_groups:
        total_initiators = [0, 0, 0]
        array_rows = []
        for array in speed_groups[speed]:
            array_initiators = list(zip(*speed_array_groups[array]))

            row = [
                array[1], sum(map(int, array_initiators[3])),
                sum(map(int, array_initiators[4])),
                sum(map(int, array_initiators[5]))
            ]

            total_initiators = [x + y
                                for x, y in zip(total_initiators, row[1:])]
            array_rows.append(map(str, row))
        grand_total = [x + y for x, y in zip(grand_total, total_initiators)]
        speed_rows += [[speed] + list(map(str, total_initiators)), *array_rows]

    speed_rows += [['Grand Total'] + grand_total]
    final_col, final_row = 0, 0
    for row_n, row_tuple in enumerate(map(SpeedTuple._make, speed_rows), 2):
        for col_n, col_value in \
                enumerate(row_tuple._asdict().values(), ord('A')):
            cell = worksheet['{}{}'.format(chr(col_n), row_n)]
            col_value = str(col_value) \
                if not isinstance(col_value, str) else col_value
            cell.value = col_value
            style_value_cell(cell)
            if row_tuple.ArrayName \
                    in [sg for sg in speed_groups.keys()] + ['Grand Total']:
                cell.font = Font(bold=True, size=11)
            set_cell_to_number(cell)
            final_col = col_n
        final_row = row_n

    sheet_process_output(
        worksheet,
        'StorageArrayPivotTable',
        'StorageArrayPivot',
        final_col,
        final_row)

    model_rows = [(key, len(val)) for key, val in content[1].items()]
    model_rows.append(('Total', sum([row[1] for row in model_rows])))
    final_col, final_row = write_excel(model_rows, worksheet, ModelTuple, 'F')
    sheet_process_output(
        worksheet,
        'ModelTable',
        'StorageArrayPivot',
        final_col,
        final_row,
        start_col=ord('F'))

    revision_rows = [(key, len(val)) for key, val in content[2].items()]
    revision_rows.append(('Total', sum([row[1] for row in revision_rows])))
    final_col, final_row = write_excel(
        revision_rows, worksheet, RevisionTuple, 'I')
    sheet_process_output(
        worksheet,
        'RevisionTable',
        'StorageArrayPivot',
        final_col,
        final_row,
        start_col=ord('I'))