def excel(self): # write the output to an excel file. @ optionally pop up execl to see ot import xlsxwriter import os workbook = xlsxwriter.Workbook(self.args.excelfile) worksheet = workbook.add_worksheet() # have to say bold to work to make wrap to work, hmm x = workbook.add_format({"text_wrap": True, "bold": True}) # write header for h in range(0, len(self.header)): worksheet.write(0, h + 1, self.header[h], x) # write collected content for r in range(1, self.row + 1): shlog.debug("content:%s", self.content) shlog.debug("writing excel row: %s" % r) keys = self.content[r].keys() for c in keys: worksheet.write(r, c, self.content[r][c], x) for c in range(self.col_max + 1): maxc = min(self.max_chars(c), 60) maxc = max(maxc, 1) # at least one char worksheet.set_column(c, c, maxc) # but, there's one more thing... worksheet.freeze_panes(1, 0) workbook.close() if self.args.show: os.system('open -a "Microsoft Excel" %s' % self.args.excelfile)
def report(self, element_sql_params): #Outer loop -- this query givee query paremater to ideniify the subject of a row # pass arguments, sql and the stanza itself so the header can be passed on for row_query_sql_params in qd(self.args, self.element_sql.format(**element_sql_params), self) : self.workspace.next_row() for segment in self.report_segments: unformatted_row_query_sql = segment.segment_sql contexts = segment.context.context_list for context in contexts: #import pdb; pdb.set_trace() merged_dict = {} merged_dict.update(row_query_sql_params) merged_dict.update(context) # header collection: collect the context to be used as a header if context.values() not in self.context_collector: self.context_collector.append(context.values()) shlog.debug("formating: %s:%s" %(unformatted_row_query_sql, merged_dict.keys())) row_query_sql = unformatted_row_query_sql.format(**merged_dict) shlog.debug(row_query_sql) row_query_sql = row_query_sql.format(**context) if segment.one_to_one: self.generate_one_to_one_segment(row_query_sql) else: self.generate_one_to_many_segment(row_query_sql) #done bulding this row, now build any substanza if self.substanza: self.substanza.workspace = self.workspace self.substanza.report(row_query_sql_params) return self.workspace
def iso_datetime(day, hour, offset_from_central_time=0): # convert mm/dd/yyyy to YYYY-MM-DD HH:MM:00.000 dayhour = day + "," + hour shlog.debug(dayhour) t = datetime.datetime.strptime(dayhour, "%m/%d/%Y,%I:%M %p") + datetime.timedelta( hours=offset_from_central_time) iso_datetime = datetime.datetime.isoformat(t) shlog.debug(iso_datetime) return iso_datetime
def insert(self, con, rows): # insert rows of Ascii into the database table # after applying conversion functions. insert_statement = (',').join(["?" for name in self.columns]) insert_statement = "insert into " + self.tableName + " values (" + insert_statement + ")" shlog.verbose(insert_statement) cur = con.cursor() for row in rows: shlog.debug(row) # apply convention functions r = ([f(item) for (item, f) in zip(row, self.hfm)]) cur.execute(insert_statement, r) con.commit()
def qd(args, sql, passed_stanza): #return results of query as a list of dictionaries, #one for each row. con = sqlite3.connect(args.dbfile) con.row_factory = sqlite3.Row cur = con.cursor() shlog.verbose(sql) results = cur.execute (sql) shlog.debug(results) # header colelction handling # 0th call to qd() is from contexts # the one that follows it directly is the one we can snatch column names from if passed_stanza.left_column_collections == 1: passed_stanza.left_columns_collector = list(map(lambda x: x[0], cur.description)) passed_stanza.left_column_collections += 1 return results
def __init__(self, args, sql): self.context_list = [] if sql == None: # shim to support no context... Fix afer getting the chain to work. self.context_list = [{}] return con = sqlite3.connect(args.dbfile) cur = con.cursor() shlog.verbose(sql) self.results = [r for r in cur.execute(sql)] self.names = [d[0] for d in cur.description] shlog.debug("context list generated with names %s" % self.names) for row in self.results: d = {} for (key, item) in zip(self.names, row): d[key] = item self.context_list.append(d) shlog.verbose("new query context: %s", self.context_list)
def excel(self): # write the output to an excel file. @ optionally pop up execl to see ot import xlsxwriter import os workbook = xlsxwriter.Workbook(self.args.excelfile) worksheet = workbook.add_worksheet() # have to say bold to work to make wrap to work, hmm x = workbook.add_format({"text_wrap": True, "bold": True}) # write header for h in range(0, len(self.header)): worksheet.write(0, h + 1, self.header[h], x) # write collected content for r in range(1, self.row + 1): shlog.debug("content:%s", self.content) shlog.debug("writing excel row: %s" % r) keys = self.content[r].keys() for c in keys: worksheet.write(r, c, self.content[r][c], x) for c in range(self.col_max + 1): maxc = min(self.max_chars(c), 60) maxc = max(maxc, 1) # at least one char worksheet.set_column(c, c, maxc) # but, there's one more thing... worksheet.freeze_panes(1, 0) # create a new sheet that will contain metadata meta_sheet = workbook.add_worksheet(name='Metadata') meta_sheet.write(0, 0, 'Report ran', x) meta_sheet.write(0, 1, datetime.now().strftime("_%m.%d.%Y_%H:%M"), x) meta_sheet.write(1, 0, 'Source database', x) meta_sheet.write(1, 1, self.args.dbfile, x) meta_sheet.write(2, 0, 'Database timestamp', x) meta_sheet.write(2, 1, str(get_db_timestamp(self)), x) for c in range(2): meta_sheet.set_column(c, c, 60) workbook.close() if self.args.show: os.system('open -a "Microsoft Excel" %s' % self.args.excelfile)
def max_chars(self, colno): #return the max characters in any cell within a column, 0 for empty column max_chars = 0 for rowno in range(self.row): if rowno in self.content.keys() and colno in self.content[rowno].keys(): #use python string as a proxy for numeric columns. max_chars = max(max_chars,len("%s" % self.content[rowno][colno])) # special header handling # if rowno is 0, that means we are looking at the header row # due to the way content vs header are implemented, self.header is offset by 1 compared to colno # for example, B2 value (colno 1 rowno 0) is locaed in self.header[0]. hence, colno-1 si implemented if rowno == 0 and colno > 0: try: max_chars = max(max_chars, len("%s" % self.header[colno-1])) except: # if an exception is thrown, it's most likely the IndexError: list index out of range # meaning there's multiple (sub)stanzas with eneven number of contexts. ignore and move on pass shlog.debug("XXX %s %s" % (colno, max_chars*2)) return max_chars
# From security_scripts.kli import env_control config = configparser.ConfigParser() rel_path = "../cfg/defaults.cfg" cfg_sources = [rel_path, # built-in config for fallback os.path.expanduser("$HOME/.scimma") # env value ] config.read(cfg_sources) profile = config.get("RDK", "profile") loglevel = config.get("RDK", "loglevel",fallback="NORMAL") #loglevel = "NORMAL" #profile = "scimma-uiuc-aws-admin" """Create command line arguments""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--loglevel', '-l', help="Level for reporting e.g. NORMAL, VERBOSE, DEBUG (default: %(default)s)", default=loglevel, choices=["NONE", "NORMAL", "DOTS", "WARN", "ERROR", "VERBOSE", "VVERBOSE", "DEBUG"]) parser.add_argument('--dry-run', '-n', default=False, action='store_true', help="dry run just show what would be done") parser.add_argument('rulename', help='name of rule to create') parser.add_argument('cipattern', help='grep friendly pattern to show CIs') # parser = parser_builder(None, parser, config, False) args = parser.parse_args() shlog.basicConfig(level=args.loglevel) shlog.debug("{}".format(args)) main(args)