def write_description(self, worksheet, description=None): if not description: return description = WriteOnlyCell(ws=worksheet, value=description) description.style = self.description_style worksheet.append((description, )) worksheet.merge_cells(start_row=description.row, start_column=description.col_idx, end_row=description.row, end_column=description.col_idx + len(self.columns) - 1)
def export_xlsx(self, filename): um = self.units.description data = [(i+1,v,um) for (i,v) in enumerate(self.values)] wb = Workbook(write_only = True) ws = wb.create_sheet() # create some styles cellstyle = NamedStyle(name="highlight") headerstyle = NamedStyle(name='headercell') wb.add_named_style(cellstyle) wb.add_named_style(headerstyle) cellstyle.font = Font(name='Calibri', size=11) headerstyle.font = Font(name='Calibri', size=11, bold=True) bd = Side(border_style='thin') cellstyle.border = Border(bottom=bd, right=bd, top=bd, left=bd) headerstyle.border = Border(bottom=bd, right=bd, top=bd, left=bd) header_labels = ['#', 'Value', 'Units'] # write header header = [] for el in header_labels: cell = WriteOnlyCell(ws, value=el) cell.style = 'headercell' header.append(cell) ws.append(header) # write data for t in data: row = [] for el in t: cell = WriteOnlyCell(ws, value=el) cell.style = 'highlight' row.append(cell) ws.append(row) wb.save(filename) # doctest: +SKIP
def openpyxl_stream(df): """ Write a dataframe straight to disk """ wb = Workbook(write_only=True) ws = wb.create_sheet() cell = WriteOnlyCell(ws) cell.style = 'Pandas' def format_first_row(row, cell): for c in row: cell.value = c yield cell rows = dataframe_to_rows(df) first_row = format_first_row(next(rows), cell) ws.append(first_row) for row in rows: row = list(row) cell.value = row[0] row[0] = cell ws.append(row) wb.save("openpyxl_stream.xlsx")
def format_output(val, eachformat, eachstyle, xls_sheet): """Returns a excel cell with the data formated as specified in the template table""" new_cell = WriteOnlyCell(xls_sheet, value="init") new_cell.style = eachstyle if val == None: val = "None" elif eachformat in [None, "OLE"]: pass elif eachformat.startswith("OLE:"): val = val.strftime(eachformat[4:]) elif eachformat == "FILE": val = file_timestamp(val) new_cell.number_format = 'YYYY MMM DD' elif eachformat.startswith("FILE:"): val = file_timestamp(val) val = val.strftime(eachformat[5:]) elif eachformat.lower().startswith("lookup-"): lookup_name = eachformat.split("-")[1] if lookup_name in template_lookups: lookup_table = template_lookups.get(lookup_name, {}) val = lookup_table.get(val, val) elif eachformat.lower() == "lookup_id": val = id_table.get(val, "No match in srum lookup table for %s" % (val)) elif eachformat.lower() == "lookup_luid": inttype = struct.unpack(">H6B", codecs.decode(format(val, '016x'), 'hex'))[0] val = template_lookups.get("LUID Interfaces", {}).get(inttype, "") elif eachformat.lower() == "seconds": val = val / 86400.0 new_cell.number_format = 'dd hh:mm:ss' elif eachformat.lower() == "md5": val = hashlib.md5(str(val)).hexdigest() elif eachformat.lower() == "sha1": val = hashlib.sha1(str(val)).hexdigest() elif eachformat.lower() == "sha256": val = hashlib.sha256(str(val)).hexdigest() elif eachformat.lower() == "base16": if type(val) == int: val = hex(val) else: val = format(val, "08x") elif eachformat.lower() == "base2": if type(val) == int: val = format(val, "032b") else: try: val = int(str(val), 2) except: val = val elif eachformat.lower() == "interface_id" and options.reghive: val = interface_table.get(str(val), "") elif eachformat.lower() == "interface_id" and not options.reghive: val = val else: val = val new_cell.value = val return new_cell
def serialise(self, worksheet, record, triggered: Triggered): linked_cell = WriteOnlyCell(worksheet, self.get_internal_id(record)) linked_cell.hyperlink = self.get_noms_ops_url(record) linked_cell.style = 'Hyperlink' row = { 'Notification rule': self.rule_description, 'Internal ID': linked_cell, } additional_header = self.additional_headers.get(type(self.rule), None) if additional_header: row[additional_header['header']] = triggered.kwargs[additional_header['triggered_kwarg']] return row
def write_title(self, worksheet, title=None): if not title: return title = WriteOnlyCell(ws=worksheet, value=title) title.style = self.title_style worksheet.append((title, )) worksheet.merge_cells(start_row=title.row, start_column=title.col_idx, end_row=title.row, end_column=title.col_idx + len(self.columns) - 1)
def _generate_spreadsheet_data(cls, request, out, report, *args, **kwargs): # Create a workbook wb = Workbook(write_only=True) ws = wb.create_sheet(title=report.name) # Create a named style for the header row readlonlyheaderstyle = NamedStyle(name="readlonlyheaderstyle") readlonlyheaderstyle.fill = PatternFill(fill_type="solid", fgColor="d0ebfb") wb.add_named_style(readlonlyheaderstyle) # Run the query conn = None try: conn = create_connection(request.database) comment = CellComment(force_text(_("Read only")), "Author", height=20, width=80) with conn.cursor() as cursor: sqlrole = settings.DATABASES[request.database].get( "SQL_ROLE", "report_role") if sqlrole: cursor.execute("set role %s" % (sqlrole, )) cursor.execute(sql=cls.getSQL(report.sql)) if cursor.description: # Write header row header = [] for f in cursor.description: cell = WriteOnlyCell(ws, value=f[0]) cell.style = "readlonlyheaderstyle" cell.comment = comment header.append(cell) ws.append(header) # Add an auto-filter to the table ws.auto_filter.ref = "A1:%s1048576" % get_column_letter( len(header)) # Write all output rows for result in cursor.fetchall(): ws.append( [_getCellValue(i, request=request) for i in result]) # Write the spreadsheet wb.save(out) finally: if conn: conn.close()
def generate_sheet(worksheet, serialiser, rule, record_set): headers = serialiser.get_headers() worksheet.append(headers) count = 0 for record in record_set: if not rule.applies_to(record): continue triggered = rule.triggered(record) if not triggered: continue row = serialiser.serialise(worksheet, record, triggered) worksheet.append([ row.get(field, None) for field in headers ]) count += 1 if count: worksheet.auto_filter.ref = f'A1:{get_column_letter(len(headers))}{count + 1}' else: note = WriteOnlyCell(worksheet, 'No notifications') note.style = 'Good' worksheet.append([serialiser.rule_description, note])
def exportWorkbook(request): # Create a workbook wb = Workbook(write_only=True) # Create a named style for the header row headerstyle = NamedStyle(name="headerstyle") headerstyle.fill = PatternFill(fill_type="solid", fgColor="70c4f4") wb.add_named_style(headerstyle) readlonlyheaderstyle = NamedStyle(name="readlonlyheaderstyle") readlonlyheaderstyle.fill = PatternFill(fill_type="solid", fgColor="d0ebfb") wb.add_named_style(readlonlyheaderstyle) # Loop over all selected entity types exportConfig = {"anonymous": request.POST.get("anonymous", False)} ok = False for entity_name in request.POST.getlist("entities"): try: # Initialize (app_label, model_label) = entity_name.split(".") model = apps.get_model(app_label, model_label) # Verify access rights permname = get_permission_codename("change", model._meta) if not request.user.has_perm("%s.%s" % (app_label, permname)): continue # Never export some special administrative models if model in EXCLUDE_FROM_BULK_OPERATIONS: continue # Create sheet ok = True ws = wb.create_sheet(title=force_text(model._meta.verbose_name)) # Build a list of fields and properties fields = [] modelfields = [] header = [] source = False lastmodified = False owner = False comment = None try: # The admin model of the class can define some fields to exclude from the export exclude = data_site._registry[model].exclude except Exception: exclude = None for i in model._meta.fields: if i.name in ["lft", "rght", "lvl"]: continue # Skip some fields of HierarchyModel elif i.name == "source": source = i # Put the source field at the end elif i.name == "lastmodified": lastmodified = i # Put the last-modified field at the very end elif not (exclude and i.name in exclude): fields.append(i.column) modelfields.append(i) cell = WriteOnlyCell(ws, value=force_text( i.verbose_name).title()) if i.editable: cell.style = "headerstyle" if isinstance(i, ForeignKey): cell.comment = CellComment( force_text( _("Values in this field must exist in the %s table" ) % force_text(i.remote_field.model. _meta.verbose_name)), "Author", ) elif i.choices: cell.comment = CellComment( force_text( _("Accepted values are: %s") % ", ".join([c[0] for c in i.choices])), "Author", ) else: cell.style = "readlonlyheaderstyle" if not comment: comment = CellComment( force_text(_("Read only")), "Author", height=20, width=80, ) cell.comment = comment header.append(cell) if i.name == "owner": owner = True if hasattr(model, "propertyFields"): if callable(model.propertyFields): props = model.propertyFields(request) else: props = model.propertyFields for i in props: if i.export: fields.append(i.name) cell = WriteOnlyCell(ws, value=force_text( i.verbose_name).title()) if i.editable: cell.style = "headerstyle" if isinstance(i, ForeignKey): cell.comment = CellComment( force_text( _("Values in this field must exist in the %s table" ) % force_text(i.remote_field.model. _meta.verbose_name)), "Author", ) elif i.choices: cell.comment = CellComment( force_text( _("Accepted values are: %s") % ", ".join([c[0] for c in i.choices])), "Author", ) else: cell.style = "readlonlyheaderstyle" if not comment: comment = CellComment( force_text(_("Read only")), "Author", height=20, width=80, ) cell.comment = comment header.append(cell) modelfields.append(i) if source: fields.append("source") cell = WriteOnlyCell(ws, value=force_text(_("source")).title()) cell.style = "headerstyle" header.append(cell) modelfields.append(source) if lastmodified: fields.append("lastmodified") cell = WriteOnlyCell(ws, value=force_text( _("last modified")).title()) cell.style = "readlonlyheaderstyle" if not comment: comment = CellComment(force_text(_("Read only")), "Author", height=20, width=80) cell.comment = comment header.append(cell) modelfields.append(lastmodified) # Write a formatted header row ws.append(header) # Add an auto-filter to the table ws.auto_filter.ref = "A1:%s1048576" % get_column_letter( len(header)) # Use the default manager if issubclass(model, HierarchyModel): model.rebuildHierarchy(database=request.database) query = (model.objects.all().using(request.database).order_by( "lvl", "pk")) elif owner: # First export records with empty owner field query = (model.objects.all().using(request.database).order_by( "-owner", "pk")) else: query = model.objects.all().using( request.database).order_by("pk") # Special annotation of the export query if hasattr(model, "export_objects"): query = model.export_objects(query, request) # Loop over all records for rec in query.values_list(*fields): cells = [] fld = 0 for f in rec: cells.append( _getCellValue(f, field=modelfields[fld], exportConfig=exportConfig)) fld += 1 ws.append(cells) except Exception: pass # Silently ignore the error and move on to the next entity. # Not a single entity to export if not ok: raise Exception(_("Nothing to export")) # Write the excel from memory to a string and then to a HTTP response output = BytesIO() wb.save(output) response = HttpResponse( content_type= "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", content=output.getvalue(), ) response["Content-Disposition"] = 'attachment; filename="frepple.xlsx"' response["Cache-Control"] = "no-cache, no-store" return response
def process_srum(ese_db, target_wb): """Process all the tables and columns in the ESE database""" total_recs = sum([ x.number_of_records for x in ese_db.tables if x.name not in skip_tables ]) if not options.quiet: print("Processing {} records across {} tables".format( total_recs, ese_db.number_of_tables - len(skip_tables))) for table_num in range(ese_db.number_of_tables): ese_table = ese_db.get_table(table_num) if ese_table.name in skip_tables: continue if ese_table.name in template_tables: tname, tfields = template_tables.get(ese_table.name) else: tname = ese_table.name[1:15] if not options.quiet: print("\nNow dumping table {} containing {} rows".format( tname, ese_table.number_of_records)) print("While you wait, did you know ...\n {} \n".format(next(ads))) xls_sheet = target_wb.create_sheet(title=tname) header_row = [x.name for x in ese_table.columns] if ese_table.name in template_tables: tname, tfields = template_tables.get(ese_table.name) header_row = [] for eachcol in ese_table.columns: if eachcol.name in tfields: cell_style, _, cell_value = tfields.get(eachcol.name) new_cell = WriteOnlyCell(xls_sheet, value=cell_value) new_cell.style = cell_style header_row.append(new_cell) else: header_row.append( WriteOnlyCell(xls_sheet, value=eachcol.name)) xls_sheet.append(header_row) column_names = [x.name for x in ese_table.columns] for row_num in range(ese_table.number_of_records): try: ese_row = ese_table.get_record(row_num) except Exception as e: print("Skipping corrupt row {0} in the {1} table. Because {2}". format(row_num, ese_table.name, str(e))) continue if ese_row == None: continue if not options.quiet and row_num % 500 == 0: print("\r|{0:-<50}| {1:3.2f}%".format( "X" * (50 * row_num // ese_table.number_of_records), 100 * row_num / ese_table.number_of_records), end="") #The row is retrieved now use the template to figure out which ones you want and format them xls_row = [] for col_num in range(ese_table.number_of_columns): val = smart_retrieve(ese_table, row_num, col_num) if val == "Error": val = "WARNING: Invalid Column Name {}".format( column_names[col_num]) elif val == None: val = "None" elif ese_table.name in template_tables: tname, tfields = template_tables.get(ese_table.name) if column_names[col_num] in tfields: cstyle, cformat, _ = tfields.get(column_names[col_num]) val = format_output(val, cformat, cstyle, xls_sheet) #print dir(new_cell.style.font) xls_row.append(val) xls_sheet.append(xls_row) if not options.quiet: print( "\r|XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX| 100.00% FINISHED" )
def create_header(self, worksheet): header = WriteOnlyCell(ws=worksheet, value=self.header) if self.header_style: header.style = self.header_style return header
def format_output(val, eachformat, eachstyle): "Returns a excel cell with the data formated as specified" new_cell = WriteOnlyCell(xls_sheet, value="init") new_cell.style = eachstyle if val == None: val = "None" elif eachformat == None: pass elif eachformat == "OLE": val = ole_timestamp(val) new_cell.number_format = 'YYYY MMM DD' elif eachformat.startswith("OLE:"): val = ole_timestamp(val) val = val.strftime(eachformat[4:]) elif eachformat == "FILE": val = file_timestamp(val) new_cell.number_format = 'YYYY MMM DD' elif eachformat.startswith("FILE:"): val = file_timestamp(val) val = val.strftime(eachformat[5:]) elif eachformat.lower() == "lookup_id": val = id_table.get(val, "No match in srum lookup table for %s" % (val)) elif eachformat.lower() == "lookup_luid": val = lookup_luid(val) elif eachformat.lower() == "lookup_sid": val = "%s (%s)" % (val, lookup_sid(val)) elif eachformat.lower() == "seconds": val = val / 86400.0 new_cell.number_format = 'dd hh:mm:ss' elif eachformat.lower() == "md5": val = hashlib.md5(str(val)).hexdigest() elif eachformat.lower() == "sha1": val = hashlib.sha1(str(val)).hexdigest() elif eachformat.lower() == "sha256": val = hashlib.sha256(str(val)).hexdigest() elif eachformat.lower() == "base16": if type(val) == "<type 'int'>": val = hex(val) else: val = str(val).encode("hex") elif eachformat.lower() == "base2": if type(val) == int: val = bin(val) else: try: val = int(str(val), 2) except: val = val new_cell.comment = Comment( "Warning: Unable to convert value %s to binary." % (val), "srum_dump") elif eachformat.lower() == "interface_id" and options.reghive: val = interface_table.get(str(val), "") elif eachformat.lower() == "interface_id" and not options.reghive: val = val new_cell.comment = Comment( "WARNING: Ignoring interface_id format command because the --REG_HIVE was not specified.", "srum_dump") else: val = val new_cell.comment = Comment( "WARNING: I'm not sure what to do with the format command %s. It was ignored." % (eachformat), "srum_dump") new_cell.value = val return new_cell
if not options.quiet: try: ad = next(ads) except: ad = "Thanks for using srum_dump!" print("While you wait, did you know ...\n" + ad + "\n") xls_sheet = target_wb.create_sheet(title=each_sheet) #Now copy the header values and header formats from the template to the new worksheet header_row = [] for eachcolumn in range(1, len(ese_template_fields) + 1): cell_value = template_sheet.cell(row=4, column=eachcolumn).value cell_style = template_sheet.cell(row=4, column=eachcolumn).style new_cell = WriteOnlyCell(xls_sheet, value=cell_value) new_cell.style = cell_style header_row.append(new_cell) xls_sheet.append(header_row) #Until we get an empty row retrieve the rows from the ESE table and process them row_num = 1 #Init to 1, first row will be 2 in spreadsheet (1 is headers) while True: try: ese_row = ese_db.getNextRow(ese_table) except Exception as e: print( "Skipping corrupt row in the %s table. The last good row was %s." % (each_sheet, row_num)) continue if ese_row == None: break #The row is retrieved now use the template to figure out which ones you want and format them
def xlsx(net, out_path='.', overwrite=False): """ Export :class:`Qiber3D.Network` as Excel file (:file:`.xlsx`). :param Qiber3D.Network net: network to export :param out_path: file or folder path where to save the network :type out_path: str, Path :param bool overwrite: allow file overwrite :return: path to saved file :rtype: Path """ out_path, needs_unlink = helper.out_path_check(out_path, network=net, prefix='', suffix='.xlsx', overwrite=overwrite, logger=net.logger) if out_path is None: return net_properties = { 'average_radius': 'Average radius', 'max_radius': 'Max radius', 'cylinder_radius': 'Equal cylinder radius', 'length': 'Length', 'volume': 'Volume', 'bbox_volume': 'Bounding box volume', 'bbox': 'Bounding box', 'bbox_size': 'Bounding box size', 'center': 'Bounding box center' } fiber_seg_properties = { 'average_radius': 'Average radius', 'max_radius': 'Max radius', 'cylinder_radius': 'Equal cylinder radius', 'length': 'Length', 'volume': 'Volume', # 'raster_volume', } wb = Workbook(write_only=True) ws = wb.create_sheet('Network') title = WriteOnlyCell(ws, f'{net.name}') ws.column_dimensions['A'].width = 21 ws.column_dimensions['B'].width = 21 title.style = 'Title' ws.append([title]) ws.append([config.app_name, config.version]) ws.append([]) subtitle = WriteOnlyCell(ws, 'Metadata') empty_subtitle = WriteOnlyCell(ws, '') subtitle.style = 'Headline 3' empty_subtitle.style = 'Headline 3' ws.append([subtitle, empty_subtitle]) if isinstance(net.input_file, Path): ws.append(['Source file', str(net.input_file.absolute())]) else: ws.append(['Source file', '-']) ws.append(['Creation date', datetime.now()]) ws.append([]) subtitle = WriteOnlyCell(ws, 'Network measurements') empty_subtitle = WriteOnlyCell(ws, '') subtitle.style = 'Headline 3' empty_subtitle.style = 'Headline 3' ws.append([subtitle, empty_subtitle]) ws.append(['Number of fibers', len(net.fiber)]) ws.append(['Number of segments', len(net.segment)]) ws.append(['Number of points', len(net.point)]) ws.append(['Number of branch points', len(net.cross_point_dict)]) for key, desciption in net_properties.items(): value = getattr(net, key) if type(value) == np.ndarray: value = str(value.tolist()) if isinstance(value, (np.floating, float)): value = WriteOnlyCell(ws, value=value) value.number_format = '0.00' ws.append([desciption, value]) ws = wb.create_sheet('Fibers') ws.column_dimensions['A'].width = 21 ws.column_dimensions['B'].width = 21 for fid, fiber in net.fiber.items(): subtitle = WriteOnlyCell(ws, f'Fiber {fid} measurements') empty_subtitle = WriteOnlyCell(ws, '') subtitle.style = 'Headline 3' empty_subtitle.style = 'Headline 3' ws.append([subtitle, empty_subtitle]) ws.append(['Number of segments', len(fiber.segment)]) ws.append([ 'Number of points', sum([len(seg) for seg in fiber.segment.values()]) ]) branch_points_raw = sum((list(a) for a in fiber.graph.edges), []) check = [] bp_set = set() for bp in branch_points_raw: if bp in check: bp_set.add(bp) else: check.append(bp) ws.append(['Number of branch points', len(bp_set)]) for key, desciption in fiber_seg_properties.items(): value = getattr(net, key) if type(value) == np.ndarray: value = str(value.tolist()) if isinstance(value, (np.floating, float)): value = WriteOnlyCell(ws, value=value) value.number_format = '0.00' ws.append([desciption, value]) ws.append(['Segment list'] + [sid for sid in fiber.segment.keys()]) ws.append([]) ws = wb.create_sheet('Segments') ws.column_dimensions['A'].width = 21 ws.column_dimensions['B'].width = 21 for sid, segment in net.segment.items(): subtitle = WriteOnlyCell(ws, f'Segment {sid} measurements') empty_subtitle = WriteOnlyCell(ws, '') subtitle.style = 'Headline 3' empty_subtitle.style = 'Headline 3' ws.append([subtitle, empty_subtitle]) ws.append(['Number of points', len(segment)]) for key, desciption in fiber_seg_properties.items(): value = getattr(net, key) if type(value) == np.ndarray: value = str(value.tolist()) if isinstance(value, (np.floating, float)): value = WriteOnlyCell(ws, value=value) value.number_format = '0.00' ws.append([desciption, value]) ws.append([]) ws = wb.create_sheet('Points') ws.append(['FID', 'SID', 'X', 'Y', 'Z', 'Radius']) for fid, fiber in net.fiber.items(): for sid, segment in fiber.segment.items(): for n, (x, y, z) in enumerate(segment.point): x = WriteOnlyCell(ws, value=x) x.number_format = '0.000' y = WriteOnlyCell(ws, value=y) y.number_format = '0.000' z = WriteOnlyCell(ws, value=z) z.number_format = '0.000' r = WriteOnlyCell(ws, value=segment.radius[n]) r.number_format = '0.000' ws.append([fid, sid, x, y, z, r]) wb.save(out_path) return out_path
def create_cell(self, worksheet, value=None): cell = WriteOnlyCell( worksheet, value=self._to_excel(value if value is not None else self.default)) if self.row_style: cell.style = self.row_style return cell