def print_test_results(self): print "" print "======================Test output======================" self.print_costs(self.test_outputs[-1]) print "" print "-------------------------------------------------------", for i, l in enumerate(self.layers): # This is kind of hacky but will do for now. if "weights" in l: if type(l["weights"]) == n.ndarray: print "%sLayer '%s' weights: %e [%e]" % ( NL, l["name"], n.mean(n.abs(l["weights"])), n.mean(n.abs(l["weightsInc"])), ), elif type(l["weights"]) == list: print "" print NL.join( "Layer '%s' weights[%d]: %e [%e]" % (l["name"], i, n.mean(n.abs(w)), n.mean(n.abs(wi))) for i, (w, wi) in enumerate(zip(l["weights"], l["weightsInc"])) ), print "%sLayer '%s' biases: %e [%e]" % ( NL, l["name"], n.mean(n.abs(l["biases"])), n.mean(n.abs(l["biasesInc"])), ), print ""
def inline_diff(self): """Simple inline diff that just assumes that either the filename has changed, or the text has been completely replaced.""" css_class = 'InlineDiff' old_attr = self._parseField(self.oldValue, filename=self.oldFilename) new_attr = self._parseField(self.newValue, filename=self.newFilename) if old_attr: old_fname = old_attr.pop(0) else: old_fname = None if new_attr: new_fname = new_attr.pop(0) else: new_fname = None a = linesep.join(old_attr or []) b = linesep.join(new_attr or []) html = [] if old_fname != new_fname: html.append( self.inlinediff_fmt % ('%s FilenameDiff' % css_class, old_fname, new_fname), ) if a != b: html.append( self.inlinediff_fmt % (css_class, a, b), ) if html: return linesep.join(html)
def print_layer_weights(self, print_entire_array = False): for i,l in enumerate(self.layers): # This is kind of hacky but will do for now. if 'weights' in l: if type(l['weights']) == n.ndarray: print "%sLayer '%s' weights: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['weights'])), n.mean(n.abs(l['weightsInc']))), if print_entire_array: n.set_printoptions(threshold=100) print "weights.shape=%s" % (str(l['weights'].shape)) print "weights=[%s]" % (str(['weights'])), print "weightsInc=[%s]" % (str(l['weightsInc'])), elif type(l['weights']) == list: print "" print NL.join("Layer '%s' weights[%d]: %e [%e]" % (l['name'], i, n.mean(n.abs(w)), n.mean(n.abs(wi))) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc']))), if print_entire_array: n.set_printoptions(threshold=100) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc'])): print "weights.shape=%s" % (str(w.shape)) print "weights=[%s]" % (str(w)), print "weightsInc=[%s]" % (str(wi)), print "%sLayer '%s' biases: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc']))), if print_entire_array: n.set_printoptions(threshold=100) print "biases.shape=%s" % (str(l['biases'].shape)) print "biases=[%s]" % (str(l['biases'])), print "biasesInc=[%s]" % (str(l['biasesInc'])), print ""
def prompt_for_player(self): ''' get player attributes from input, initial player instance and add player to the game ''' available_colours = self.game.get_available_colours() text = linesep.join(["choose type of player", "0 - computer", "1 - human"]) choice = self.validate_input(text, int, (0, 1)) if choice == 1: name = self.validate_input("Enter name for player", str, str_len=(1, 30)) available_options = range(len(available_colours)) if len(available_options) > 1: # show available colours options = ["{} - {}".format(index, colour) for index, colour in zip(available_options, available_colours)] text = "choose colour" + linesep text += linesep.join(options) choice = self.validate_input(text, int, available_options) colour = available_colours.pop(choice) else: # only one colour left colour = available_colours.pop() player = Player(colour, name, self.prompt_choose_pawn) elif choice == 0: # automatically assign colours colour = available_colours.pop() player = Player(colour) self.game.add_palyer(player)
def check(self): ''' Check repository for unreferenced and missing files ''' # Check if the repo is local if not self.local: raise ISError(u"Repository must be local") local_files = set(listdir(self.config.path)) local_files.remove(self.config.dbname) local_files.remove(self.config.lastname) db_files = set(self.getallmd5()) # check missing files arrow("Checking missing files") missing_files = db_files - local_files if len(missing_files) > 0: out(linesep.join(missing_files)) # check unreferenced files arrow("Checking unreferenced files") unref_files = local_files - db_files if len(unref_files) > 0: out(linesep.join(unref_files)) # check corruption of local files arrow("Checking corrupted files") for f in local_files: fo = PipeFile(join(self.config.path, f)) fo.consume() fo.close() if fo.md5 != f: out(f)
def discard(): if ''.join(lines): if fragile: raise ParseException('Unexpected text:%s%s' % (linesep, linesep.join(lines))) elif not quiet: print >> stderr, 'Ignoring text:%s%s' % \ (linesep, linesep.join(lines))
def __repr__(self): #[str(self.delimiter), outputs = sep.join([str(key) + ': ' + str(self.to_dict[key][DataStructure._INDEX_VALUE]) for key in self.to_dict]) inputs = sep.join([str(key) + ': ' + str(self.from_dict[key][DataStructure._INDEX_VALUE]) for key in self.from_dict]) return sep.join([self.delimiter, '--Outputs--', outputs, '--Inputs--', inputs])
def write_cell_data(file_path, mesh, scalars={}, vectors={}, title="fvm2D"): from os import linesep with open(file_path, "w") as f: write_header_pnts(f, mesh, title) nbEdges = sum([len(boundary) for boundary in mesh.boundaries]) tot_len = sum([len(c) for c in mesh.cells]) + 2*nbEdges ncells = len(mesh.cells) + nbEdges f.write(linesep) f.write("CELLS "+str(ncells)+" "+str(tot_len+ncells)+linesep) for c in mesh.cells: f.write(str(len(c))+" "+" ".join(map(str, c))+linesep) for boundary in mesh.boundaries_points: for e in boundary: f.write(str(len(e))+" "+" ".join(map(str,e))+linesep) f.write(linesep) f.write("CELL_TYPES "+str(ncells)+linesep) for c in mesh.cells: if len(c) == 3: f.write("5"+linesep) elif len(c) == 4: f.write("9"+linesep) else: f.write("7"+linesep) f.write(linesep.join(["3"]*nbEdges)) f.write(linesep) f.write("CELL_DATA "+str(ncells)+linesep) for var in scalars: field = scalars[var] f.write("SCALARS "+var+" float 1"+linesep) f.write("LOOKUP_TABLE default"+linesep) f.write(linesep.join(map(str, field.data))+linesep) for b in field.boundaries: f.write(linesep.join(map(str, b.data))+linesep) f.write(linesep) for var in vectors: xField = vectors[var][0] yField = vectors[var][1] f.write("VECTORS "+var+" float"+linesep) for x, y in zip(xField.data, yField.data): f.write(" ".join(map(str, [x, y, 0.0]))+linesep) for xBs, yBs in zip(xField.boundaries, yField.boundaries): for x,y in zip(xBs, yBs): f.write(" ".join(map(str, [x, y, 0.0]))+linesep) f.write(linesep)
def send_via_serial(self, vm, commands, action="send"): """ Connect to vm via netcat pipe files for netcat are in specific directory on ESX datastore @param vm: VirtualMachine instance @param commands: list of commands """ commands_log = list() conn = self.get_serial_connection_to_vyatta(vm) logging.info('%s: connected' % vm.name_on_esx) pattern = vm.ssh_pattern timeout = CONFIGURE_TIMEOUT err_log = "" # Sends commands for cmd in commands: try: conn.sendline(cmd) result = conn.expect(pattern, timeout=timeout) err_log +="\n=======" err_log +="\ncmd " + cmd err_log +="\npattern " + str(pattern) err_log +="\nresult= " + str(result) err_log +="\nbefore " + conn.before err_log +="\nafter " + conn.after err_log +="\n=======" if result == 1: # enter password for sudo err_log +="\nGOT SUDO " conn.sendline(vm.password) conn.expect(pattern, timeout=timeout) err_log +="\nbefore " + conn.before err_log +="\nafter " + conn.after err_log +="\n=======" commands_log.append('output: ' + conn.before + '\n') if cmd.startswith('ls'): logging.info("{}:packages which will be installed:{}" "".format(vm.name_on_esx, conn.before)) except: logging.error( "{}:{}".format(vm.name_on_esx, linesep.join(commands_log))) finally: logging.debug(err_log) conn.close() log = linesep.join(commands_log) if 'Commit failed' in log \ or 'Set failed' in log\ or 'dpkg: error' in log: logging.error("{}:{}".format(vm.name_on_esx, log)) else: logging.debug("{}:{}".format(vm.name_on_esx, log)) logging.info('{}: commands were sent'.format(vm.name_on_esx)) return vm.name_on_esx, log
def print_network(self): print "-------------------------------------------------------", for i,l in enumerate(self.layers): # This is kind of hacky but will do for now. if 'weights' in l: if type(l['weights']) == n.ndarray: print "%sLayer '%s' weights: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['weights'])), n.mean(n.abs(l['weightsInc']))), elif type(l['weights']) == list: print "" print NL.join("Layer '%s' weights[%d]: %e [%e] (%e,%e)" % (l['name'], i, n.mean(n.abs(w)), n.mean(n.abs(wi)), n.min(w), n.max(w) ) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc']))), print "%sLayer '%s' biases: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc']))), print ""
def __repr__(self): r = ': ' + self.oid r += ' [OBSOLETE]' if self.obsolete else '' r += (linesep + ' Short name: ' + list_to_string(self.name)) if self.name else '' r += (linesep + ' Description: ' + self.description) if self.description else '' r += '<__desc__>' r += (linesep + ' Extensions:' + linesep + linesep.join([' ' + s[0] + ': ' + list_to_string(s[1]) for s in self.extensions])) if self.extensions else '' r += (linesep + ' Experimental:' + linesep + linesep.join([' ' + s[0] + ': ' + list_to_string(s[1]) for s in self.experimental])) if self.experimental else '' r += (linesep + ' OidInfo: ' + str(self.oid_info)) if self.oid_info else '' r += linesep return r
def __repr__(self): tempstr = "" if len(self.general) > 0: tempstr = "General" for key, value in self.general.items(): tempstr = linesep.join([tempstr, " " + key + " : " + value]) if len(self.subject) > 0: tempstr = linesep.join([tempstr, "Subject"]) for key, value in self.subject.items(): tempstr = linesep.join([tempstr, " " + key + " : " + value]) return tempstr
def __repr__(self): r = 'DSA Schema from: ' + self.schema_entry r += linesep if isinstance(self.attribute_types, SEQUENCE_TYPES): r += (' Attribute types:' + linesep + ' ' + ', '.join([str(self.attribute_types[s]) for s in self.attribute_types])) if self.attribute_types else '' else: r += (' Attribute types:' + str(self.attribute_types)) r += linesep if isinstance(self.object_classes, SEQUENCE_TYPES): r += (' Object classes:' + linesep + ' ' + ', '.join([str(self.object_classes[s]) for s in self.object_classes])) if self.object_classes else '' else: r += (' Object classes:' + str(self.object_classes)) r += linesep if isinstance(self.matching_rules, SEQUENCE_TYPES): r += (' Matching rules:' + linesep + ' ' + ', '.join([str(self.matching_rules[s]) for s in self.matching_rules])) if self.matching_rules else '' else: r += (' Matching rules:' + str(self.matching_rules)) r += linesep if isinstance(self.matching_rule_uses, SEQUENCE_TYPES): r += (' Matching rule uses:' + linesep + ' ' + ', '.join([str(self.matching_rule_uses[s]) for s in self.matching_rule_uses])) if self.matching_rule_uses else '' else: r += (' Matching rule uses:' + str(self.matching_rule_uses)) r += linesep if isinstance(self.dit_content_rules, SEQUENCE_TYPES): r += (' DIT content rules:' + linesep + ' ' + ', '.join([str(self.dit_content_rules[s]) for s in self.dit_content_rules])) if self.dit_content_rules else '' else: r += (' DIT content rules:' + str(self.dit_content_rules)) r += linesep if isinstance(self.dit_structure_rules, SEQUENCE_TYPES): r += (' DIT structure rules:' + linesep + ' ' + ', '.join([str(self.dit_structure_rules[s]) for s in self.dit_structure_rules])) if self.dit_structure_rules else '' else: r += (' DIT structure rules:' + str(self.dit_structure_rules)) r += linesep if isinstance(self.name_forms, SEQUENCE_TYPES): r += (' Name forms:' + linesep + ' ' + ', '.join([str(self.name_forms[s]) for s in self.name_forms])) if self.name_forms else '' else: r += (' Name forms:' + str(self.name_forms)) r += linesep if isinstance(self.ldap_syntaxes, SEQUENCE_TYPES): r += (' LDAP syntaxes:' + linesep + ' ' + ', '.join([str(self.ldap_syntaxes[s]) for s in self.ldap_syntaxes])) if self.ldap_syntaxes else '' else: r += (' LDAP syntaxes:' + str(self.ldap_syntaxes)) r += linesep r += 'Other:' + linesep for k, v in self.other.items(): r += ' ' + str(k) + ': ' + linesep try: r += (linesep.join([' ' + str(s) for s in v])) if isinstance(v, SEQUENCE_TYPES) else str(v) except UnicodeDecodeError: r += (linesep.join([' ' + str(escape_bytes(s)) for s in v])) if isinstance(v, SEQUENCE_TYPES) else str(escape_bytes(v)) r += linesep return r
def header(self, testname): if self.rank == 0: mf = len(makeTestData.slices) mt = len(makeTestData.tsvars) nf = len(self.spec_args['infiles']) nt = mt if self.spec_args['timeseries'] is None else len( self.spec_args['timeseries']) hline = '-' * 100 hdrstr = [hline, '{}.{}:'.format(self.__class__.__name__, testname), '', ' specifier({}/{} infile(s), {}/{} TSV(s), ncfmt={ncfmt}, compression={compression}, meta1d={meta1d}, backend={backend})'.format( nf, mf, nt, mt, **self.spec_args), ' s2srun {}'.format(' '.join(str(a) for a in self.runargs())), hline] print eol.join(hdrstr)
def print_test_results(self): print "" print "======================Test output======================" self.print_costs(self.test_outputs[-1]) print "" print "-------------------------------------------------------" for i,l in enumerate(self.layers): # This is kind of hacky but will do for now. if 'weights' in l: if type(l['weights']) == n.ndarray: print "%sLayer '%s' weights: %e [%e] L2 norm/sqrt(size) %e" % (NL, l['name'], n.mean(n.abs(l['weights'])), n.mean(n.abs(l['weightsInc'])), n.linalg.norm(l['weights'])/sqrt(l['weights'].size)) elif type(l['weights']) == list: print NL.join("Layer '%s' weights[%d]: %e [%e] L2 norm/sqrt(size) %e" % (l['name'], i, n.mean(n.abs(w)), n.mean(n.abs(wi)), n.linalg.norm(w)/sqrt(w.size)) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc']))), print "%sLayer '%s' biases: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc']))) print ""
def print_test_results(self): print NL + "======================Test output======================" self.print_costs(self.test_outputs[-1]) if not self.test_only: print NL + "----------------------Averages-------------------------" self.print_costs(self.aggregate_test_outputs(self.test_outputs[-len(self.test_batch_range):])) print NL + "-------------------------------------------------------", for name,val in sorted(self.layers.items(), key=lambda x: x[1]['id']): # This is kind of hacky but will do for now. l = self.layers[name] if 'weights' in l: wscales = [(l['name'], i, n.mean(n.abs(w)), n.mean(n.abs(wi)), n.max(n.abs(w))) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc']))] print "" print NL.join("Layer '%s' weights[%d]: %e [%e] [%e] max: %e " % (s[0], s[1], s[2], s[3], s[3]/s[2] if s[2] > 0 else 0, s[4]) for s in wscales), print "%sLayer '%s' biases: %e [%e] max: %e " % (NL, l['name'], n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc'])), n.max(n.abs(l['biases']))), print ""
def export_kv(self, PATH=None): if PATH is None: from conf import gamepath PATH = gamepath from kivy.logger import Logger #print " -- EXPORT KV -- "*10 from conf import CP relativ = CP.getboolean('Designer', 'TMPL_RELATIVE_SIZE_POS') save_cm = CP.getboolean('Designer', 'SAVE_IN_CM') save_relpath = CP.getboolean('Designer', 'TMPL_RELATIVE_GAMEPATH') imports = list() directives = self.current_template.directives[:] #Will be used to find a interresting base for relpath print 'at this stage, i should insert a check for name vs libraty & templates' if not self.current_template.template_name: print 'Current template as no name: reverting to default' self.current_template.template_name = "TMPL" KV = list() for template in self.templates: tmpls, imports, directives = template.export_to_kv(level=1, save_cm=save_cm, relativ=relativ, save_relpath=save_relpath, RELPATH=PATH) Logger.debug('export these imports to kv: ' + str(imports)) print self.nodes, template for node in self.nodes[template].nodes: if not hasattr(node, 'target'): continue field = node.target if field == template: #skip export of the root template: done above continue t, i, d = field.export_field(level=2, save_cm=save_cm, relativ=relativ, save_relpath=save_relpath, RELPATH=PATH) tmpls.extend(t) imports.extend(i) directives.extend(d) #Prepend include if imports: tmpls.insert(0, "") for imp in imports: if imp: tmpls.insert(0,"#:include %s"%imp) Logger.debug("directives at the end" + str(directives)) if directives: tmpls.insert(0,"") for directive in directives: tmpls.insert(0, "#:%s"%directive) from os import linesep KV.append(linesep.join(tmpls)) KV.extend(' ') return linesep.join(KV)
def main(): if len(argv) < 3: exit('usage: %s quran_file trans_file' % argv[0]) charset = 'utf-8' quran_file = argv[1] #prepare surah numbers to be splitted surah_numbers = set() # read quran translation file and split each surah in a list surah = [] description = [] surah_trans = {} trans_lines = open(quran_file, 'U', charset).read().split('\n') current = 1 for line in trans_lines: #line = str(line).strip().replace('\xef\xbb\xbf', '') if line=='' or line.startswith('#'): description.append(line) continue parts = line.rpartition('|') surah.append(parts[2]) #dest = ''.join( [ quran_file, ".trans"]) dest = argv[2]; open(dest, 'w', charset).writelines(linesep.join(surah))
def normalize_paragraph(text, line_len=80, indent=''): """normalize a text to display it with a maximum line size and optionally arbitrary indentation. Line jumps are normalized. The indentation string may be used top insert a comment mark for instance. :type text: str or unicode :param text: the input text to normalize :type line_len: int :param line_len: expected maximum line's length, default to 80 :type indent: str or unicode :param indent: optional string to use as indentation :rtype: str or unicode :return: the input text normalized to fit on lines with a maximized size inferior to `line_len`, and optionally prefixed by an indentation string """ text = _NORM_SPACES_RGX.sub(' ', text) line_len = line_len - len(indent) lines = [] while text: aline, text = splittext(text.strip(), line_len) lines.append(indent + aline) return linesep.join(lines)
def custom_result_summary_renderer(res): from datalad.ui import ui from os import linesep if not len(res): ui.message("Got nothing new") return nfiles = count_results(res, type='file') nsuccess_file = count_results(res, type='file', status='ok') nfailure = nfiles - nsuccess_file msg = "Tried to get %d %s that had no content yet." % ( nfiles, single_or_plural("file", "files", nfiles)) if nsuccess_file: msg += " Successfully obtained %d. " % nsuccess_file if nfailure: msg += " %d (failed)." % (nfailure,) ui.message(msg) # if just a few or less than initially explicitly requested if len(res) < 10: msg = linesep.join([ "{path}{type} ... {suc}".format( suc=item.get('status'), path=item.get('path'), type=' [{}]'.format(item['type']) if 'type' in item else '') for item in res]) ui.message(msg)
def analyse_file(self): #reading file try: dom = read_ccdb_text_file(self.file_name) except IOError as error: log.warning(LogFmt("Unable to read file '{0}'. The error message is: '{1}'", self.file_name, error)) raise #Is there data at all? if not dom.has_data: message = "Seems like file has no appropriate data" log.warning(message) raise ValueError(message=message) #check what we've got assert isinstance(dom, TextFileDOM) if not dom.data_is_consistent: message = "Inconsistency error. " + dom.inconsistent_reason log.warning(message) raise ValueError(message=message) if dom.column_names: columns_str = " ".join([col_name for col_name in dom.column_names]) else: columns_str = str(len(dom.rows[0])) + "col" name = self.table_path if self.table_path else "<name>" comment = self.comment if self.comment else "<comments>" log.info("Command to create a table: " + linesep) log.info(LogFmt("mktbl {0} -r {1} {2} #{3}", name, len(dom.rows), columns_str, comment)) log.info(linesep) if dom.comment_lines: log.info(LogFmt("{0}Comments in file: {0}{1}", linesep, linesep.join(ln for ln in dom.comment_lines)))
def raise_error(value): message = linesep.join([ "Expected a period (eg. '2017', '2017-01', '2017-01-01', ...); got: '{}'.".format(value), "Learn more about legal period formats in OpenFisca:", "<https://openfisca.org/doc/coding-the-legislation/35_periods.html#periods-in-simulations>." ]) raise ValueError(message)
def raw_info(self): """returns a Unicode string of low-level MetaData information whereas __unicode__ is meant to contain complete information at a very high level raw_info() should be more developer-specific and with very little adjustment or reordering to the data itself """ from os import linesep from audiotools import output_table # align text strings on the "=" sign, if any table = output_table() for comment in self.comment_strings: row = table.row() if u"=" in comment: (tag, value) = comment.split(u"=", 1) row.add_column(tag, "right") row.add_column(u"=") row.add_column(value) else: row.add_column(comment) row.add_column(u"") row.add_column(u"") return (u"{}: {}".format(self.__comment_name__(), self.vendor_string) + linesep + linesep.join(table.format()))
def build_tax_benefit_system(country_package_name, extensions, reforms): if country_package_name is None: country_package_name = detect_country_package() try: country_package = importlib.import_module(country_package_name) except ImportError: message = linesep.join([traceback.format_exc(), 'Could not import module `{}`.'.format(country_package_name), 'Are you sure it is installed in your environment? If so, look at the stack trace above to determine the origin of this error.', 'See more at <https://github.com/openfisca/country-template#installing>.']) raise ImportError(message) if not hasattr(country_package, 'CountryTaxBenefitSystem'): raise ImportError('`{}` does not seem to be a valid Openfisca country package.'.format(country_package_name)) country_package = importlib.import_module(country_package_name) tax_benefit_system = country_package.CountryTaxBenefitSystem() if extensions: for extension in extensions: tax_benefit_system.load_extension(extension) if reforms: for reform_path in reforms: tax_benefit_system = tax_benefit_system.apply_reform(reform_path) return tax_benefit_system
def generate(name, txt, dic, x0=0, y0=0, w=None, h=None): ''' Creates xpm file with given name, given draw as string, colors as dict. Extra args are for generate parts of xpm. ''' if w is None: w = len(txt.split("\n")[0]) if h is None: h = len(txt.split("\n")) x1 = x0 + w y1 = y0 + h colors = {} lines = [i[x0:x1] for i in txt.split("\n")[y0:y1]] for i in lines: for j in i: if j not in colors: colors[j] = dic[j] xpmlines = [ "/* XPM */", "static char * %s = {" % name.replace("-", "_"), "\"%d %d %d 1\", " % (w, h, len(colors)) ] xpmlines.extend( "\"%s\tc %s\", " % i for i in colors.items() ) xpmlines.extend( "\"%s\", " % i for i in lines ) xpmlines.append( "};" ) with open("%s.xpm" % name,"w") as f: f.write(linesep.join(xpmlines))
def unified_diff(self): """Return a unified diff""" a = [safe_utf8(i) for i in self._parseField(self.oldValue, filename=self.oldFilename)] b = [safe_utf8(i) for i in self._parseField(self.newValue, filename=self.newFilename)] return linesep.join(difflib.unified_diff(a, b, self.id1, self.id2))
def compareAgainstGoldStandard(self, inFilename): """ Read and process the input file and compare its output against the gold standard. """ inFilenameBase = splitext(basename(inFilename))[0] fullPathNamespace = inFilenameBase.replace(sep, '.') trials = ( ('.out', (True, True, False, fullPathNamespace, inFilenameBase, 4)), ('.outnc', (True, False, False, fullPathNamespace, inFilenameBase, 4)), ('.outnn', (True, True, False, fullPathNamespace, None, 4)), ('.outbare', (False, False, False, fullPathNamespace, None, 4)) ) for options in trials: output = self.readAndParseFile(inFilename, TestDoxypypy.__Options(*options[1])) goldFilename = splitext(inFilename)[0] + options[0] + '.py' goldFile = open(goldFilename) goldContentLines = goldFile.readlines() goldFile.close() # We have to go through some extra processing to ensure line endings # match across platforms. goldContent = linesep.join(line.rstrip() for line in goldContentLines) self.assertEqual(output.rstrip(linesep), goldContent.rstrip(linesep))
def go(url): page = html.document_fromstring(scraperwiki.scrape(url)) headings = page.cssselect("div.Text2 strong") if len(headings) > 1: yield headings[1].text key = "" vals = [] for metadata in page.cssselect("div#metadata div.Text"): if metadata[0].tag == "strong": if key and vals: yield key, linesep.join(vals) vals = [] key = metadata[0].text_content().strip() try: vals.append(metadata[1].text_content().strip()) except IndexError: pass else: print metadata.text.strip(), metadata[0].text_content() vals.append(metadata.text + metadata[0].text_content().strip()) for img in page.cssselect("a img"): try: webservice = img.getparent() url = webservice.attrib["href"] if url != "rss/preview.xml": webservice_type = img.attrib["alt"] yield webservice_type, url except IndexError: pass except KeyError: msg = html.tostring(webservice) if not "onclick" in msg: print "ERR", msg, url
def header(self): if self.rank == 0: mf = len(makeTestData.slices) mt = len(makeTestData.tsvars) nf = len(self.spec_args['infiles']) nt = mt if self.spec_args['timeseries'] is None else len( self.spec_args['timeseries']) hline = '-' * 100 hdrstr = ['', hline, '{}.{}:'.format(self.__class__.__name__, inspect.stack()[1][3]), '', ' specifier({}/{} infile(s), {}/{} TSV(s), ncfmt={ncfmt}, compression={compression}, meta1d={meta1d}, backend={backend}, metafile={metafile})'.format( nf, mf, nt, mt, **self.spec_args), ' create(serial={serial}, verbosity={verbosity}, wmode={wmode}, once={once}, simplecomm={simplecomm})'.format( **self.create_args), ' convert(output_limit={output_limit}, rchunks={rchunks}, wchunks={wchunks})'.format(**self.convert_args), hline, ''] print eol.join(hdrstr)
def show_all(self): ''' Show changelog for all versions ''' for ver in sorted(self, strvercmp, reverse=True): out(u'-- #purple#version:#reset# %s' % ver) out(linesep.join(self[ver]))
def _check_outfile(self, tsvar, **args): assertions_dict = mkTestData.check_outfile(tsvar=tsvar, **args) failed_assertions = [ key for key, value in assertions_dict.iteritems() if value is False ] assert_msgs = ['Output file check for variable {0!r}:'.format(tsvar)] assert_msgs.extend( [' {0}'.format(assrt) for assrt in failed_assertions]) self.assertEqual(len(failed_assertions), 0, eol.join(assert_msgs))
def file_write_txtlist(f, d, **ka): """Write txt file as list""" from os import linesep d = linesep.join(d) logging.debug('Start writing file ' + f) with open(f, 'w', **ka) as fh: fh.write(d) logging.debug('Finish writing file ' + f) return d
def _indent(code, num_levels=1): """ Indent each line of code with num_levels * 4 spaces code -- some python code as a (multi-line) string """ indentation = " " * (4 * num_levels) line_list = code.split(linesep) new_line_list = [] for line in line_list: new_line_list.append(indentation + line) return linesep.join(new_line_list)
def __repr__(self): lines = [] for k, v in self._data.items(): if isinstance(v, InfoTree): lines.append('%s:' % k) lines.append(indent(repr(v), ' ')) else: lines.append('%s: %s' % (k, v)) return linesep.join(lines)
def pretty_inputs_string(self): """ :return: printable version of input in rows and columns """ inputs = self.get_inputs() rows = [] for i in range(self.sight_y_up + self.sight_y_down): rows.append(str(inputs[self.sight_x * i:(i + 1) * self.sight_x])) return linesep.join(rows)
def evaluate(self, result_file, seed_words, n_bags): f = None if result_file is not None: f = open(result_file, 'wb') for n_bag in n_bags: bags = self.build_bags(seed_words, n_bag) for bag in bags: most_similar_words = self.model.most_similar(positive=bag, topn=100) info("Most similar words to: " + str(bag) + linesep) info(linesep.join([x[0] + " - distance: " + str(x[1]) for x in most_similar_words])) if f is not None: f.write(b"Most similar words to: " + str(bag).encode("utf-8") + linesep.encode("utf-8")) f.write(linesep.join([x[0] + " - distance: " + str(x[1]) for x in most_similar_words]) .encode("utf-8")) f.write(linesep.encode("utf-8")) if f is not None: f.flush() f.close()
def main(argv): try: config = _validate_input(argv) print( linesep.join('\t'.join(triple) for triple in cross_versions(config))) except Exception as e: print(str(e)) exit(_ERROR_RUNTIME)
def parse_errors(json): """Extracts the error messages returned from a failed API call Args: json (dict): JSON dictionary of the response body Returns: Line-delimited string of error messages returned by NGP VAN """ return linesep.join([err["text"] for err in json])
def write_ignores(ignore_fn, ignores): """ Writes the lines to ignore when writing to orgs_analysis.csv. :param ignore_fn: Name of file containing the lines to ignore. :param ignores: A set of the lines to ignore. :return: None """ with open(ignore_fn, 'wb') as ignore_fp: print >> ignore_fp, linesep.join(list(sorted(ignores)))
def check_variable_defined_for_entity(self, variable_name): variable_entity = self.simulation.tax_benefit_system.get_variable(variable_name, check_existence = True).entity if not isinstance(self, variable_entity): message = linesep.join([ u"You tried to compute the variable '{0}' for the entity '{1}';".format(variable_name, self.plural), u"however the variable '{0}' is defined for '{1}'.".format(variable_name, variable_entity.plural), u"Learn more about entities in our documentation:", u"<http://openfisca.org/doc/coding-the-legislation/50_entities.html>."]) raise ValueError(message)
def collect(cls, pl, st, en): global res, ec l = [f"time: {descTime(en-st)}"] for p in pl: if p.ec > ec: ec = p.ec l += [f"process{p.order} id: {p.pid}", p.descEC()] l += [""] res = linesep.join(l)
def add_streams(self, event_stream_infos) -> None: stream_index = 0 for event_name, stream_infos in event_stream_infos.items( ): # per event for stream_name, stream_info in stream_infos.items(): lines = self._get_stream_code(event_name, stream_name, stream_index, stream_info) self.cells.append(new_code_cell(source=linesep.join(lines))) stream_index += 1
def churn_code(meta, global_vars, cg_out_list, include_files, tsk_groups, f) : """ generate code of module tasks_cg_out = [ (task_name, cg_out), ... ] f - writeble filelike object """ tsk_cg_out = list(cg_out_list) f.write("".join('#include "{0}"{1}'.format(incl, linesep) for incl in include_files)) periodic_sched = "periodic_sched" in meta and meta["periodic_sched"] if periodic_sched : ps_cg_out = __churn_periodic_sched(tsk_groups, "millis", meta, tmr_data_type=core.VM_TYPE_WORD) tsk_cg_out.append(("loop", ps_cg_out)) # print here(), churn_task_code("loop", ps_cg_out) decls = [] functions = [] variables = [] # for name, cg_out in sorted(tsk_cg_out.items(), key=lambda x: x[0]) : for name, cg_out in tsk_cg_out : # print here(), name decl, func, lifted_vars = churn_task_code(name, cg_out) variables.extend(lifted_vars) decls.append(decl) decls.append(linesep) functions.append(func) f.write("".join(decls)) g_vars_grouped = groupby(sorted(global_vars, key=lambda x: x[1]), key=lambda x: x[1]) g_vars_code = tuple((pipe_type + " " + ",".join( (i+" = "+str(pipe_default)for (i, _, pipe_default) in sorted(vlist))) + ";" + linesep) for pipe_type, vlist in g_vars_grouped) # pprint(g_vars_code) # print here(), g_vars_code f.write(linesep.join(g_vars_code)) f.write(linesep.join(variables)) f.write(linesep.join(functions)) f.write(linesep)
def print_full_documentation(self): """output a full documentation in ReST format""" for checker in sort_checkers(self._checkers.values()): if checker.name == 'master': prefix = 'Main ' if checker.options: for section, options in checker.options_by_section(): if section is None: title = 'General options' else: title = '%s options' % section.capitalize() print title print '~' * len(title) rest_format_section(sys.stdout, None, options) print else: prefix = '' title = '%s checker' % checker.name.capitalize() print title print '-' * len(title) if checker.__doc__: # __doc__ is None with -OO print linesep.join([l.strip() for l in checker.__doc__.splitlines()]) if checker.options: title = 'Options' print title print '~' * len(title) for section, options in checker.options_by_section(): rest_format_section(sys.stdout, section, options) print if checker.msgs: title = ('%smessages' % prefix).capitalize() print title print '~' * len(title) self.list_checkers_messages( checker) print if getattr(checker, 'reports', None): title = ('%sreports' % prefix).capitalize() print title print '~' * len(title) for report in checker.reports: print ':%s: %s' % report[:2] print print
def decode_map(cls, map_name, map_obj, map_type, sizeinfo=False): map_lines = [ 'Layout of BPF map %s (type %s, FD %d, ID %d):' % (map_name, map_type, map_obj.map_fd, map_obj.map_id) ] map_lines += cls.print_map_ctype(map_obj.Key, 'key', sizeinfo=sizeinfo) map_lines += cls.print_map_ctype(map_obj.Leaf, 'value', sizeinfo=sizeinfo) return linesep.join(map_lines)
def main(num): """ return passwords with a number of num. useful for writing in files `python pw_generator.py 100 > 100_passwords` writes 100 generated passwords into the file 100_passwords (only possible on POSIX compatible systems) >>> print pw_generator.main(2) Kjeh0kA7UbFs cWwitUDzs7Jf """ return linesep.join(generate_hash() for password in xrange(num))
def printable_highscores(scores: list) -> str: pad_to = longest_item_in_list_list(scores, 0) + 3 print(pad_to) printable = [] for score in scores[::-1]: printable.append('{} : {}'.format( score[0] + ('-' * (pad_to - len(score[0]))), score[1])) return linesep.join(printable)
def print_result(result_dict): """Print overall comparison results.""" matched = sorted([json for (json, result) in result_dict.items() if result == 'OK']) missing = sorted([json for (json, result) in result_dict.items() if result == 'missing']) discrepant = [json for json in result_dict if json not in matched and json not in missing] print("Total reference files: {}".format(len(result_dict))) print('------------------------') print("Matched: {} {}".format(linesep, linesep.join(matched))) print('------------------------') print("Missing test files: {} {}".format(linesep, linesep.join(missing))) print('------------------------') print("Discrepant") for json in discrepant: print(linesep, json, linesep, linesep.join(result_dict[json])) print("Total reference files: {}".format(len(result_dict))) print("Total matched: {}".format(len(matched))) print("Total missing: {}".format(len(missing))) print("Total discrepant: {}".format(len(discrepant)))
def __str__(self): s = [ "Project: %s" % self.name, "Version: %s" % self.version, "Path: %s" % self.path, "Parents: %s" % self.parents, "Children: %s" % self.children ] from os import linesep return linesep.join(s)
def check(self, tsvar): args = {} args.update(self.spec_args) args.update(self.create_args) args.update(self.convert_args) assertions_dict = check_outfile(tsvar=tsvar, **args) failed_assertions = [key for key, value in assertions_dict.items() if value is False] assert_msgs = ['Output file check for variable {0!r}:'.format(tsvar)] assert_msgs.extend([' {0}'.format(assrt) for assrt in failed_assertions]) assert len(failed_assertions) == 0, eol.join(assert_msgs)
def festi(_, __='mhi'): i2, p, g = PI(mtf(__)), 'Daily', '' if datetime.now().hour > 12: p, g = 'Session', 'afternoon ' r = i2.estimate(_, programmatic=True)[p] return linesep.join([ f'{mtf(__)} estimate base on {g}opening @ {_},', i2.estimate(_), f"range between {max(r['upper'])} and {min(r['lower'])}", '' ])
def _remove_blurb(input_text): string_list = input_text.splitlines() empty_line_found=False while not empty_line_found: if string_list[0].strip() == "": empty_line_found = True string_list.pop(0) return linesep.join(string_list)
def fake_data(name): if not env.racadm_data: return None data_path = os.path.join(env.racadm_data, name) if os.path.exists(data_path): with open(data_path) as fp: rsp = linesep.join(fp.read().splitlines()) return rsp else: return None
def paint(self, position): '''expect dict of key - occupied positions and value - list of pawns on that position ''' self.board_tmpl_curr = deepcopy(BOARD_TMPL) self._place_pawns(position) board_paint = [''.join(row_list) for row_list in self.board_tmpl_curr] board_paint_str = linesep.join(board_paint) return board_paint_str
def GenPage(self, gameId, pageId=1, START=True, typ='Images'): wx.BeginBusyCursor() self.stb.SetStatusText('Fetching %s imgs page %s....' % (gameId, pageId)) urls = { 'Images': bgg_img_browse_url, 'Files': bgg_file_browse_url, 'Links': bgg_link_browse_url, } url = urls[typ] src = urlretrieve(url % (int(gameId), int(pageId))) src = src[0] texts = [ "<h1>%s of %s (#%s) - Page %d</h1>" % (typ, self.results.GetItemText( self.results.Selection), gameId, pageId), '<ol>' ] if typ == "Images": bs = BS(file(src).read()) imgs = [x.get('src') for x in bs.findAll('img', {"class": None})] for index, img in enumerate(imgs): href = img.replace('_mt.jpg', '.jpg') img = img.replace('_mt.jpg', '_t.jpg') texts.append('%d:<a href="%s"><img src="%s"></a></li>' % (index, href, img)) texts.append('</ol>') else: texts.append(file(src).read().decode('cp1252')) texts.append('</ol>') #texts.append('<div align="center"><input type="button" value"Download Selected"></div></form>') from tempfile import mktemp from os import linesep, startfile if not self.currentfile: dst = mktemp(suffix=".html") file(dst, 'wb').write(linesep.join(texts).encode('cp1252')) self.currentfile = dst else: dst = self.currentfile file(dst, 'ab').write(linesep.join(texts).encode('cp1252')) wx.EndBusyCursor() if START: startfile(self.currentfile)
def unified_diff(self): """Return a unified diff""" a = [ safe_utf8(i) for i in self._parseField(self.oldValue, filename=self.oldFilename) ] b = [ safe_utf8(i) for i in self._parseField(self.newValue, filename=self.newFilename) ] return linesep.join(difflib.unified_diff(a, b, self.id1, self.id2))
def _replace_missing_entries_with_x(input_text): string_list = input_text.splitlines() penultimate_string = string_list[-2] ultimate_string_as_list = list(string_list[-1]) for index, character in enumerate(ultimate_string_as_list): if character == " " and penultimate_string[index] == ".": ultimate_string_as_list[index] = "X" string_list[-1] = "".join(ultimate_string_as_list) return linesep.join(string_list)
def main(argv): try: dependency, config = _validate_input(argv) if dependency == 'go': _try_set_min_go_version() versions = get_versions_from(DEPS[dependency]['url'], DEPS[dependency]['re']) versions = filter_versions(versions, DEPS[dependency]['min'], **config) print(linesep.join(map(str, versions))) except Exception as e: print(str(e)) exit(_ERROR_RUNTIME)
def init_tracker(url, idsite, tracker_token): try: from openfisca_tracker.piwik import PiwikTracker tracker = PiwikTracker(url, idsite, tracker_token) info = linesep.join([ u'You chose to activate the `tracker` module. ', u'Tracking data will be sent to: ' + url, u'For more information, see <https://github.com/openfisca/openfisca-core#tracker-configuration>.' ]) log.info(info) return tracker except ImportError: message = linesep.join([ traceback.format_exc(), u'You chose to activate the `tracker` module, but it is not installed.', u'For more information, see <https://github.com/openfisca/openfisca-core#tracker-installation>.' ]) log.warn(message)
def __repr__(self): r = ': ' + self.oid r += ' [OBSOLETE]' if self.obsolete else '' r += (linesep + ' Short name: ' + list_to_string(self.name)) if self.name else '' r += (linesep + ' Description: ' + self.description) if self.description else '' r += '<__desc__>' r += (linesep + ' Extensions:' + linesep + linesep.join([ ' ' + s[0] + ': ' + list_to_string(s[1]) for s in self.extensions ])) if self.extensions else '' r += (linesep + ' Experimental:' + linesep + linesep.join([ ' ' + s[0] + ': ' + list_to_string(s[1]) for s in self.experimental ])) if self.experimental else '' r += (linesep + ' OidInfo: ' + str(self.oid_info)) if self.oid_info else '' r += linesep return r