def show_results(self): size_to_results = self._group_by(self.results, keyfunc=lambda x: x.test_size) pretty_print(size_to_results) for size in size_to_results: pid_to_results = self._group_by(size_to_results[size], keyfunc=lambda x: x.worker_pid) print self.title('Testing with files of size %s:' % human_readable_size(size, over_time=False)) print '---' print self.header(' Ingesting random bytes:') for pid in pid_to_results: print self.header(' Worker %s:' % pid), \ map(lambda res: human_readable_size( res.write_result['random_avg']), pid_to_results[pid]) print self.header(' Overall average per worker:'), \ chain(map(lambda res: res.write_result['random_avg'], size_to_results[size]), average, human_readable_size) print '' print self.header(' Ingesting zero bytes:') for pid in pid_to_results: print self.header(' Worker %s:' % pid), \ map(lambda res: human_readable_size( res.write_result['zero_avg']), pid_to_results[pid]) print self.header(' Overall average per worker:'), \ chain(map(lambda res: res.write_result['zero_avg'], size_to_results[size]), average, human_readable_size) print '' print self.header(' Recalling random bytes:') for pid in pid_to_results: print self.header(' Worker %s:' % pid), \ map(lambda res: human_readable_size( res.read_result['random_avg']), pid_to_results[pid]) print self.header(' Overall average per worker:'), \ chain(map(lambda res: res.read_result['random_avg'], size_to_results[size]), average, human_readable_size) print '' print self.header(' Recalling zero bytes:') for pid in pid_to_results: print self.header(' Worker %s:' % pid), \ map(lambda res: human_readable_size( res.read_result['zero_avg']), pid_to_results[pid]) print self.header(' Overall average per worker:'), \ chain(map(lambda res: res.read_result['zero_avg'], size_to_results[size]), average, human_readable_size) print '---' print ''
def pprint(data=None, flush=True, **options): """Pretty print function. Args: data (str, optional): Data to output into stdout. flush (bool, optional): Flush the stream after output if True. **options: Other options passed to :meth:`print`. """ if data is None: data = '' data = textwrap.dedent(data) if isinstance(data, list) or isinstance(data, dict): pretty_print(data, **options) else: if 'stream' in options: options['file'] = options.pop('stream') print(data, **options) if flush: sys.stdout.flush()
# print in style from pprint import pprint as pretty_print from copy import copy A2d = [ [1, 2, 3, 4, 5, 6, 7], [12, 26, 35, 43, 35, 63, 74], [231, 342, 3343, 4344, 5343, 633, 7346570], ] print(A2d[2][1]) print(A2d) print('') pretty_print(A2d) l1 = ["a", 'b', 'c', 'd', 'e', 'f'] l = [l1, l1, l1] print(l) # Jaaadoooooooooo l[2][0] = "z" # here all the rows change the value at 0th position pretty_print(l) # bcoz line 18 place pointers to actual list l1 # So to overcome print("After Smjhdhaari\n\n") l = [copy(l1), copy(l1), copy(l1)] l[1][0] = "x"
#!/bin/python import mt940 import json from pprint import pprint as pretty_print def default(value): if isinstance(value, mt940.models.Transactions): data = value.data.copy() data['transactions'] = value.transactions return data elif hasattr(value, 'data'): return value.data # load the transactions rabo_transactions = mt940.parse('rabobank.txt') print("Final Opening Balance: ", rabo_transactions.data.get('final_opening_balance')) print("Final Closing Balance: ", rabo_transactions.data.get('final_closing_balance')) print("Transactions:") for transaction in rabo_transactions: pretty_print(transaction.data) # write to a json file with open('rabobank.json', 'w') as w: w.write(json.dumps(rabo_transactions, default=default, indent=4))
syntax_error(self.lexer.source, token.start, "expected {} token got {}".format(kind, token.kind)) def expect_name(self, value): ''' Checks that the current token of the Tokens.NAME kind is of the supplied value. :param value: The value of the name token that is expected :throws SyntaxError: If the wrong token appears :returns: The expected token ''' token = self.curr_token if (token.kind == Tokens.NAME) and (token.value == value): self.advance() return token syntax_error(self.lexer.source, token.start, "expected name token({}) token got {}".format(value, token.value)) if __name__ == "__main__": import sys from lexer import Lexer from pprint import pprint as pretty_print source = open(sys.argv[1]).read() print source lexer = Lexer(source=source) pretty_print(Parser(lexer=lexer).parse())
from pprint import pprint as pretty_print from copy import copy, deepcopy array1D = [1, 2, 3, 4, 5, 6] array2D = [deepcopy(array1D), deepcopy(array1D), deepcopy(array1D)] print(array1D) print("\n") pretty_print(array2D) array2D[1][1] = 8745 print(array1D) pretty_print(array2D)
from pprint import pprint as pretty_print from copy import copy, deepcopy nums_2d = [ [1,2,3,4,5,6,7], [8,9,10,11,12,13,14,15], [16,17,18,19,20,21,22] ] print(nums_2d) pretty_print(nums_2d) nums_2d[2][1]= -5 pretty_print(nums_2d) letters =["A","B","C","D","E"] letters_2d = [copy(letters),copy(letters),copy(letters)] pretty_print(letters_2d) letters_2d[0][0] = 'F' pretty_print(letters_2d)
def main(): """ Downloads (if not using cache), reads and parses the CoViD-19 data csv and creates an output file with processed data. """ # default arguments debug = False use_cached_data = False # parse arguments args = [arg.lower() for arg in sys.argv[1:]] if "help" in args: usage(0) if "debug" in args: debug = True args.remove("debug") if "print" in args: print_results = True args.remove("print") if "usecache" in args: use_cached_data = True args.remove("usecache") if len(args) == 1: iso_code = args[0].upper() else: usage(1) # debug print config if debug: print("DEBUG: debug = {}".format(debug)) print("DEBUG: print_results = {}".format(print_results)) print("DEBUG: use_cached_data = {}".format(use_cached_data)) print("DEBUG: INPUT_DATA_FILE = {}".format(INPUT_DATA_FILE)) print("DEBUG: INPUT_DATA_URL = {}".format(INPUT_DATA_URL)) print("DEBUG: iso_code = {}".format(iso_code)) print("DEBUG: OUTPUT_DATA_FILE = {}".format(OUTPUT_DATA_FILE)) # check/get INPUT_DATA_FILE if use_cached_data and os.path.exists(INPUT_DATA_FILE): print("INFO: Using locally cached file '{}'!".format(INPUT_DATA_FILE)) seconds_since_input_data_file_modified = time.time( ) - os.path.getmtime(INPUT_DATA_FILE) if seconds_since_input_data_file_modified > 3600: print("WARN: Locally cached file '{}' is more than 1 hour old!". format(INPUT_DATA_FILE)) elif not use_cached_data: subprocess.call(["wget", INPUT_DATA_URL, "-O", INPUT_DATA_FILE]) else: print("ERROR: Cannot find file '{}'!".format(INPUT_DATA_FILE)) sys.exit(2) # init data list processed_data = [] # populate lists print("INFO: Reading '{}'...".format(INPUT_DATA_FILE)) with open(INPUT_DATA_FILE) as csvfile: csv_data = csv.reader(csvfile) print("INFO: Parsing csv data...") columns = next(csv_data) new_cases_column_index = columns.index("new_cases") new_tests_column_index = columns.index("new_tests") if debug: print("DEBUG: new_cases_column_index = {}".format( new_cases_column_index)) print("DEBUG: new_tests_column_index = {}".format( new_tests_column_index)) for row in csv_data: if row[0] == iso_code: new_cases = stringy_float_to_int(row[new_cases_column_index]) new_tests = stringy_float_to_int(row[new_tests_column_index]) if not new_tests: percent_new_cases = 0 else: percent_new_cases = round(100 * new_cases / new_tests, 6) processed_data.append([row[3], percent_new_cases]) column_names = ["date", "percent_new_cases"] # debug print data if debug: print("DEBUG: Printing processed data...") pretty_print(processed_data) # populate new csv file print("INFO: Writing to '{}'...".format(OUTPUT_DATA_FILE)) with open(OUTPUT_DATA_FILE, 'w') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(column_names) if print_results: column_names = [_.capitalize() for _ in column_names] print("\t".join(column_names)) for row in processed_data: csvwriter.writerow(row) if print_results: row[1] = str(round(row[1], 2)) + "%" print("\t".join(row)) # end print("Done! exiting...")
def parse_stream_record(record): ''' Parses the stream record and returns the modifications made to the database. The following cases exist: * (old, None) - The record has been deleted * (None, new) - The record has been inserted * (old, new) - The record has been modified For the underlying schema, feel free to read the documentation: http://docs.aws.amazon.com/dynamodbstreams/latest/APIReference/API_Types.html :returns: (operation, old_record, new_record) ''' def cleanup(r): return { k : r[k].values()[0] for k in r } if r else None operation = record['eventName'] old_item = record['dynamodb'].get('OldImage', None) new_item = record['dynamodb'].get('NewImage', None) return (operation, cleanup(old_item), cleanup(new_item)) if __name__ == "__main__": from pprint import pprint as pretty_print for record in get_record_stream(): operation, old_item, new_item = parse_stream_record(record) print "next record {}:\n".format(operation) if old_item: pretty_print(old_item); print if new_item: pretty_print(new_item); print
def dataOutput(res): # print the result m = Data(res) for a, b, c, d, e in zip(m.push(), m.title(), m.date(), m.author(), m.link()): pretty_print(a, b, c, d) print(e+'\n') return previous(res)
def pprint(obj): pretty_print(dumps(obj))
from pprint import pprint as pretty_print #import the copy and deepcopy function from the copy module from copy import copy, deepcopy #An aray of arays, hence a 2 dimentional aray nums_2d = [[1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15], [16, 17, 18, 19, 20, 21, 22]] #Query element from matrix #Get #11 print(nums_2d[1][3]) #Above ^ says get row 2 (ie index 1 of aray 1), and get element 4 of aray 2 (ie index 3) #Use pretty_print function to get cleaner output pretty_print(nums_2d) #assign values to matrix #Swap element #17 with #-5 nums_2d[2][1] = -5 pretty_print(nums_2d) #Using copy Functions ''' If we create a 2d array (array2) out of a number of arrays (array1), any items within array1 which are changed will be changed gobally throughout array2. ''' #Ex. letters = ["A", "B", "C", "D", "E"] letters_2d = [letters, letters, letters]
def copy_into(self, query, pprint=False): ''' use copy into method to send and load data to and from S3: https://docs.snowflake.net/manuals/sql-reference/sql/copy-into-table.html 1) unload to s3 2) copy from s3 database.conf file must have s3 credentials i.e. aws_access_key_id= aws_secret_access_key= query <string> - sql statement must include AWS credentials variables ex) COPY INTO test_schema.test_table FROM 's3://<bucket>/test_key' FILE_FORMAT = ( FIELD_DELIMITER = '|' COMPRESSION = gzip ) CREDENTIALS = (aws_key_id='{aws_access}' aws_secret_key='{aws_secret}') pprint optional <boolean> - prints formated sql query and time to execute in minutes returns dictionary of metadata ''' conn = self.connect_to_db() cp = configparser.ConfigParser() cp.read(self.config_file) aws_creds = { 'aws_access': cp.get(self.db_name, 'aws_access_key_id'), 'aws_secret': cp.get(self.db_name, 'aws_secret_access_key') } creds = "CREDENTIALS = (aws_key_id='{aws_access}' aws_secret_key='{aws_secret}')" if pprint == True: clock = timer() print(self.format_sql(query)) with conn.cursor(DictCursor) as cur: try: cur.execute(query.format(**aws_creds)) data = cur.fetchall() conn.commit() if pprint == True: clock.print_lap('m') pretty_print(data) status = data[0].get('status') if status=='LOAD_FAILED': raise snowflake.connector.errors.ProgrammingError('{}'.format(data[0])) elif status=='PARTIALLY_LOADED': warnings.warn('partially load - {0}'.format(data[0].get('first_error'))) finally: self.close_conn() return data
if ( (self.source[position] == ' ') or (self.source[position] == '\t') or (self.source[position] == '\r') or (self.source[position] == '\n') or (self.source[position] == ',')): position += 1 elif (self.source[position] == '#'): position += 1 while ( (position < self.eof) and (self.source[position] >= ' ') and (self.source[position] != '\n') and (self.source[position] != '\r')): position += 1 else: break return start, position def __iter__(self): return self def __next__(self): return self.next() #-------------------------------------------------------------------------------- # main test method #-------------------------------------------------------------------------------- if __name__ == "__main__": import sys from pprint import pprint as pretty_print source = open(sys.argv[1]).read() print source pretty_print(list(Lexer(source=source)))
def get_roidata(videopath, roi_specs): '''videopath: complete path to video file roi_specs: list of (roi_instance, birth_msec, death_msec) tuples, e.g. roi_specs = [ (MeanRectRoi((0, 0), (100, 100)), 2000, 10000), # observe sec 2 -> 10 (MeanRectRoi((10, 10), (110, 110)), 10000, 20000) # observe sec 10 -> 20 ] lets regions in roi_specs collect data during their lifetime will dump all results and region screenshots into a new directory located in the same directory as the video file, returns path''' # work? if not roi_specs: return # set up a directory for results saveto = _set_up_result_directory(videopath) # save ROI specs with open(os.path.join(saveto, 'roi_specs.py'), 'w') as f: pretty_print(roi_specs, f) # msec -> frame cap = RoiCap(videopath) roi_specs = [(roi, ms_to_frame(start, cap.fps), ms_to_frame(end, cap.fps)) for roi, start, end in roi_specs] # set up queues for insertion/deletion insertion_order = deque( sorted(roi_specs, key=lambda (roi, birth, death): birth)) deletion_order = deque( sorted(insertion_order, key=lambda (roi, birth, death): death)) # get last interesting frame number for progress-print last_deletion_frame = min(deletion_order[-1][2], cap.frame_count) while True: # NOTE: fast forwarding to the next birth/death (via cap.play) instead # of executing loop body for every frame has been tried and does not # seem to yield a noticeable performance boost compared to this simpler # version # also, skipping via setting attributes seems to be non-exact for some # types of videos -> see comments in image_series function # add ROIs take_screenshot = False while insertion_order: roi, birth, _ = insertion_order[0] if cap.pos_frames + 1 == birth: insertion_order.popleft() cap.add_roi(roi) take_screenshot = True else: break # generate next frame, draw ROIs try: frame = next(cap) except StopIteration: break # if ROIs were added, take screenshot if take_screenshot: im_filename = 'frame_{}.png'.format(int(cap.pos_frames)) frame.save(os.path.join(saveto, im_filename)) # delete rois while deletion_order: roi, _, death = deletion_order[0] if cap.pos_frames == death: deletion_order.popleft() save_roi(roi, saveto) cap.delete_roi(roi._id) else: break # abort if all data is already collected if not cap.rois and not insertion_order: break # show progress print '{:.1f} %\r'.format(100.0 * cap.pos_frames / last_deletion_frame), sys.stdout.flush() # save leftover rois (with death > video length) for roi in cap.rois.itervalues(): save_roi(roi, saveto) # save a screenshot of all rois cap.rois = {} cap.pos_frames -= 1 for roi, _, _ in roi_specs: cap.add_roi(roi) last_frame = next(cap) last_frame.save(os.path.join(saveto, 'all_rois.png')) print '100.0 %' return saveto
from copy import copy, deepcopy #copy for 1D lines #deepcopy for compound object or 2d or higher nums_2d = [ [1,2,3,4,5,6,7], [8,9,10,11,12,13,14,15], [16,17,18,19,20,21,22] ] #handling arrays print(nums_2d[1][3]) #11 print(nums_2d) #[[1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15], [16, 17, 18, 19, 20, 21, 22]] pretty_print(nums_2d) #[[1, 2, 3, 4, 5, 6, 7], # [8, 9, 10, 11, 12, 13, 14, 15], # [16, 17, 18, 19, 20, 21, 22]] nums_2d[2][1] = -5 pretty_print(nums_2d) #[[1, 2, 3, 4, 5, 6, 7], # [8, 9, 10, 11, 12, 13, 14, 15], # [16, -5, 18, 19, 20, 21, 22]] letters = ["A", "B", "C", "D", "E"] letters_2d = [letters, letters, letters] pretty_print(letters_2d) #[['A', 'B', 'C', 'D', 'E'], # ['A', 'B', 'C', 'D', 'E'],
def print_object(obj): pretty_print({k:v for k,v in obj.__dict__.iteritems() if k[0]!='_'})