def parse_route_path(path): fmt = Formatter() rule = turn_noncapturing(path) arguments = OrderedDict() pattern = '' for literal_text, field_name, format_spec, conversion in fmt.parse(rule): pattern += literal_text if field_name is None: continue format_spec = format_spec.lower() subpattern = _route_rule_types.get(format_spec, None) if subpattern is None: subpattern = _route_rule_types.get('str') if field_name in arguments: err = "The argument \{%s:%s\} are already defined in %s" err %= (field_name, format_spec, path) raise SyntaxError(err) arguments[field_name] = format_spec pattern += '(?P<' + field_name + '>' + subpattern + ')' return pattern, arguments
def get_value(self, key, args, kwds): # Let default get_value() handle ints if not isinstance(key, str): return Formatter.get_value(self, key, args, kwds) # HACK, we allow defining strings via fields to allow # conversions if key[:2] == 's|': return key[2:] if key[:2] == 't|': # title mode ("TITLE ATTR") include_title = True elif key[:2] == 'a|': # plain attribute mode ("ATTR") include_title = False else: # No special field, have default get_value() get it return Formatter.get_value(self, key, args, kwds) key = key[2:] (title, value) = self._nlattr(key) if include_title: if not title: title = key # fall back to key as title value = kw(title) + ' ' + value return value
def parse_pattern(format_string, env, wrapper=lambda x, y: y): """ Parse the format_string and return prepared data according to the env. Pick each field found in the format_string from the env(ironment), apply the wrapper on each data and return a mapping between field-to-replace and values for each. """ formatter = Formatter() fields = [x[1] for x in formatter.parse(format_string) if x[1] is not None] prepared_env = {} # Create a prepared environment with only used fields, all as list: for field in fields: # Search for a movie attribute for each alternative field separated # by a pipe sign: for field_alt in (x.strip() for x in field.split('|')): # Handle default values (enclosed by quotes): if field_alt[0] in '\'"' and field_alt[-1] in '\'"': field_values = field_alt[1:-1] else: field_values = env.get(field_alt) if field_values is not None: break else: field_values = [] if not isinstance(field_values, list): field_values = [field_values] prepared_env[field] = wrapper(field_alt, field_values) return prepared_env
def get_format_args(fstr): # TODO: memoize formatter = Formatter() fargs, fkwargs, _dedup = [], [], set() def _add_arg(argname, type_char='s'): if argname not in _dedup: _dedup.add(argname) argtype = _TYPE_MAP.get(type_char, str) # TODO: unicode try: fargs.append((int(argname), argtype)) except ValueError: fkwargs.append((argname, argtype)) for lit, fname, fspec, conv in formatter.parse(fstr): if fname is not None: type_char = fspec[-1:] fname_list = re.split('[.[]', fname) if len(fname_list) > 1: raise ValueError('encountered compound format arg: %r' % fname) try: base_fname = fname_list[0] assert base_fname except (IndexError, AssertionError): raise ValueError('encountered anonymous positional argument') _add_arg(fname, type_char) for sublit, subfname, _, _ in formatter.parse(fspec): # TODO: positional and anon args not allowed here. if subfname is not None: _add_arg(subfname) return fargs, fkwargs
def select(self,type='VAV',subtype = ['RM STPT DIAL','ROOM TEMP'], floor = 0,nexp='', pattern ='{i}',cond = 'cmax(x,7)',maxn = 10, dtfilter = ''): env = self._genv() env.init() l = env.getSensorsByType(type) lname = [] for it in l: k = it.split(':') if floor != 0: fl = int(k[0].split('.')[1]) if fl != floor: continue if nexp != '': sname = k[0].split('.')[3] if not fnmatch.fnmatch(sname,nexp): continue if k[1] in subtype: lname.append((it,env.getSensorId(it))) ltemp = sorted(lname,key=itemgetter(0))[:maxn] filt = '' i = 0 f = Formatter() l = "{{%s}}" f.format(l) ns = '' exp = [] i,m =0,len(ltemp) loopi=0 while i < m: h = f.parse(pattern) for a,b,c,d in h : ns += a if b is not None : ns += '{' + str(eval(b)) + '}' i = eval(b) loopi = max(i,loopi) if cond != '': ns += '.apply(lambda x:' + cond + ')' if loopi < m-1: ns += '; ' i = loopi + 1 cs = self.getSeries(ltemp,dtfilter) re = self.getExpression(ltemp,ns, cs) series = [] for name ,c in re: dataserie = defaultdict() json_s = c[['stime','svalue']].to_json(orient='values') dataserie['name']=name dataserie['data']=json_s series.append(dataserie) return series
def get_value(self, key, args, kwargs): if '|' in key: for kk in key.split('|'): out = Formatter.get_value(self, kk, args, kwargs) if out is not None and len(out) > 0: return out out = Formatter.get_value(self, key, args, kwargs) if failOnEmpty and (out is None or len(out) == 0): raise ValueError("Cannot resolve %s" % key) return out
def pformat(temp, **fmt): """Format a template string partially. Examples -------- >>> pformat("{a}_{b}", a='x') 'x_{b}' """ formatter = Formatter() mapping = _FormatDict(fmt) return formatter.vformat(temp, (), mapping)
def strfdelta(tdelta, fmt): """ Get a string from a timedelta. """ f, d = Formatter(), {} l = {'D': 86400, 'H': 3600, 'M': 60, 'S': 1} k = list(map(lambda x: x[1], list(f.parse(fmt)))) rem = int(tdelta.total_seconds()) for i in ('D', 'H', 'M', 'S'): if i in k and i in l.keys(): d[i], rem = divmod(rem, l[i]) return f.format(fmt, **d)
def tokenize_format_str(fstr, resolve_pos=True): ret = [] if resolve_pos: fstr = infer_positional_format_args(fstr) formatter = Formatter() for lit, fname, fspec, conv in formatter.parse(fstr): if lit: ret.append(lit) if fname is None: continue ret.append(BaseFormatField(fname, fspec, conv)) return ret
def make_tag(): today = datetime.datetime.today() date = today.strftime(date_format) print(date) seq=1 "git tag --list=release/%s/*" % date values={ "date": date, "sequence": seq } formatter = Formatter() tag = formatter.vformat(tag_format, [], values) print(tag)
def get_field(self, field_name, args, kwargs): if self.expand == StringFormatType.error: return Formatter.get_field(self, field_name, args, kwargs) try: return Formatter.get_field(self, field_name, args, kwargs) except (AttributeError, KeyError, TypeError): reg = re.compile("[^\.\[]+") try: key = reg.match(field_name).group() except: key = field_name if self.expand == StringFormatType.empty: return ('', key) else: # StringFormatType.unchanged return ("{%s}" % field_name, key)
def save_params(self): f = Formatter() tokens = f.parse(self.command_string) params = [] for (_ , param_name, _ , _) in tokens: if param_name is not None: if param_name in self.inputdict: param = self.inputdict[param_name] else: param = iobjs.Input(name=param_name) if param_name in self._defaults: param.default = self._defaults[param_name] params += [param] self.inputs = params
def format(self, format_string, *args, **kwargs): def escape_envvar(matchobj): value = (x for x in matchobj.groups() if x is not None).next() return "${{%s}}" % value format_string_ = re.sub(self.ENV_VAR_REGEX, escape_envvar, format_string) return Formatter.format(self, format_string_, *args, **kwargs)
def SafeStringParse(formatter, s, keys): """ A "safe" version :func:`string.Formatter.parse` that will only parse the input keys specified in ``keys`` Parameters ---------- formatter : string.Formatter the string formatter class instance s : str the string we are formatting keys : list of str list of the keys to accept as valid """ # the default list of keys l = list(Formatter.parse(formatter, s)) toret = [] for x in l: if x[1] in keys: toret.append(x) else: val = x[0] if x[1] is not None: fmt = "" if not x[2] else ":%s" %x[2] val += "{%s%s}" %(x[1], fmt) toret.append((val, None, None, None)) return iter(toret)
def vformat(self, format_string, args, kwargs): matcher = re.compile(self._expr, re.VERBOSE) # special case of returning the object if the entire string # matches a single parameter try: result = re.match('^%s$' % self._expr, format_string, re.VERBOSE) except TypeError: return format_string.format(**kwargs) if result is not None: try: return kwargs[result.group("key")] except KeyError: pass # handle multiple fields within string via a callback to re.sub() def re_replace(match): key = match.group("key") default = match.group("default") if default is not None: if key not in kwargs: return default else: return "{%s}" % key return match.group(0) format_string = matcher.sub(re_replace, format_string) return Formatter.vformat(self, format_string, args, kwargs)
def vformat(self, format_string, args, kwargs): self.unused_args = {} ret = Formatter.vformat(self, format_string, args, kwargs) if not self.unused_args: return ret extra_data = ', '.join('{0}={1}'.format(*kv) for kv in self.unused_args.items()) return '{0} ({1})'.format(ret, extra_data)
def strfdelta(tdelta, fmt=None): from string import Formatter if not fmt: # The standard, most human readable format. fmt = "{D} days {H:02} hours {M:02} minutes {S:02} seconds" if tdelta == timedelta(): return "0 minutes" formatter = Formatter() return_map = {} div_by_map = {'D': 86400, 'H': 3600, 'M': 60, 'S': 1} keys = map(lambda x: x[1], list(formatter.parse(fmt))) remainder = int(tdelta.total_seconds()) for unit in ('D', 'H', 'M', 'S'): if unit in keys and unit in div_by_map.keys(): return_map[unit], remainder = divmod(remainder, div_by_map[unit]) return formatter.format(fmt, **return_map)
def _wrapped(pseudo_type, string, **kwargs): text = [] formatter = Formatter() for (literal_text, field_name, format_spec, conversion) in formatter.parse(string): if literal_text: literal_text = next_splitter_or_func( literal_text, self.splitters, func, pseudo_type) literal_text = literal_text.replace('{', '{{').replace('}', '}}') text.append(literal_text) if field_name is not None: fmt = field_name if conversion is not None: fmt += '!' + conversion if format_spec: fmt += ':' + format_spec text.append('{%s}' % (fmt, )) return "".join(text)
def get_value(self, key, args, kwargs): try: return Formatter.get_value(self, key, args, kwargs) except KeyError: if self.allow_empty: logger.debug("Found uninitialized key %s, replaced with empty string", key) return "" raise
def get_value(self, key, args, kwds): if isinstance(key, str): try: return kwds[key] except KeyError: return self.context[key] else: return Formatter.get_value(key, args, kwds)
def strfdelta(tdelta, fmt='{D:02}d {H:02}h {M:02}m {S:02}s', inputtype='timedelta'): """Convert a datetime.timedelta object or a regular number to a custom- formatted string, just like the stftime() method does for datetime.datetime objects. The fmt argument allows custom formatting to be specified. Fields can include seconds, minutes, hours, days, and weeks. Each field is optional. Some examples: '{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default) '{W}w {D}d {H}:{M:02}:{S:02}' --> '4w 5d 8:04:02' '{D:2}d {H:2}:{M:02}:{S:02}' --> ' 5d 8:04:02' '{H}h {S}s' --> '72h 800s' The inputtype argument allows tdelta to be a regular number instead of the default, which is a datetime.timedelta object. Valid inputtype strings: 's', 'seconds', 'm', 'minutes', 'h', 'hours', 'd', 'days', 'w', 'weeks' """ # Convert tdelta to integer seconds. if inputtype == 'timedelta': remainder = int(tdelta.total_seconds()) elif inputtype in ['s', 'seconds']: remainder = int(tdelta) elif inputtype in ['m', 'minutes']: remainder = int(tdelta)*60 elif inputtype in ['h', 'hours']: remainder = int(tdelta)*3600 elif inputtype in ['d', 'days']: remainder = int(tdelta)*86400 elif inputtype in ['w', 'weeks']: remainder = int(tdelta)*604800 f = Formatter() desired_fields = [field_tuple[1] for field_tuple in f.parse(fmt)] possible_fields = ('W', 'D', 'H', 'M', 'S') constants = {'W': 604800, 'D': 86400, 'H': 3600, 'M': 60, 'S': 1} values = {} for field in possible_fields: if field in desired_fields and field in constants: values[field], remainder = divmod(remainder, constants[field]) return f.format(fmt, **values)
def get_format_args(fstr): """ Turn a format string into two lists of arguments referenced by the format string. One is positional arguments, and the other is named arguments. Each element of the list includes the name and the nominal type of the field. # >>> get_format_args("{noun} is {1:d} years old{punct}") # ([(1, <type 'int'>)], [('noun', <type 'str'>), ('punct', <type 'str'>)]) # XXX: Py3k >>> get_format_args("{noun} is {1:d} years old{punct}") == \ ([(1, int)], [('noun', str), ('punct', str)]) True """ # TODO: memoize formatter = Formatter() fargs, fkwargs, _dedup = [], [], set() def _add_arg(argname, type_char="s"): if argname not in _dedup: _dedup.add(argname) argtype = _TYPE_MAP.get(type_char, str) # TODO: unicode try: fargs.append((int(argname), argtype)) except ValueError: fkwargs.append((argname, argtype)) for lit, fname, fspec, conv in formatter.parse(fstr): if fname is not None: type_char = fspec[-1:] fname_list = re.split("[.[]", fname) if len(fname_list) > 1: raise ValueError("encountered compound format arg: %r" % fname) try: base_fname = fname_list[0] assert base_fname except (IndexError, AssertionError): raise ValueError("encountered anonymous positional argument") _add_arg(fname, type_char) for sublit, subfname, _, _ in formatter.parse(fspec): # TODO: positional and anon args not allowed here. if subfname is not None: _add_arg(subfname) return fargs, fkwargs
def tokenize_format_str(fstr, resolve_pos=True): """Takes a format string, turns it into a list of alternating string literals and :class:`BaseFormatField` tokens. By default, also infers anonymous positional references into explict, numbered positional references. To disable this behavior set *resolve_pos* to ``False``. """ ret = [] if resolve_pos: fstr = infer_positional_format_args(fstr) formatter = Formatter() for lit, fname, fspec, conv in formatter.parse(fstr): if lit: ret.append(lit) if fname is None: continue ret.append(BaseFormatField(fname, fspec, conv)) return ret
def get_params(self): formatter = Formatter() format_iterator = formatter.parse(self.get_cleaned_content()) params = { 'args': list(), 'kwargs': set() } for _tuple in format_iterator: field_name = _tuple[1] if field_name is not None: if field_name == '': params['args'].append(field_name) elif field_name.isdigit(): if field_name not in params['args']: params['args'].append(field_name) else: params['kwargs'].add(field_name) return params
def _write_to_file(table_a_dest, genome_ids_a, genome_ids_b, common_prefix_a, common_prefix_b, calculations): ''' :param table_a_dest: :type table_a_dest: filename :param genome_ids_a: :type genome_ids_a: list or genome ids :param common_prefix_a: :type common_prefix_a: string :param common_prefix_b: :type common_prefix_b: string :param calculations: :type calculations: list of clade_calcs instances ''' with open(table_a_dest, 'a') as write_handle: # Print introduction about the strain comparison write_handle.write('#{} {} strains compared with {} {} strains\n'.format(len(genome_ids_a), common_prefix_a, len(genome_ids_b), common_prefix_b)) # Print the genome IDs involved in each of the strains write_handle.write('#IDs {}: {}\n'.format(common_prefix_a, ', '.join(genome_ids_a))) write_handle.write('#IDs {}: {}\n'.format(common_prefix_b, ', '.join(genome_ids_b))) # Print column headers for the data to come max_nton = len(genome_ids_a) // 2 headers = _get_column_headers(max_nton) write_handle.write('#' + '\t'.join(headers)) write_handle.write('\n') # Print data rows format_str = '\t'.join('{{{}}}'.format(key) for key in headers) from string import Formatter formatter = Formatter() for clade_calcs in calculations: write_handle.write(formatter.vformat(format_str, None, clade_calcs.values)) write_handle.write('\n')
def strfdelta(tsec, format_str="P", format_no_day="PT{H}H{M}M{S}S", format_zero="PT0S"): """Formatting the time duration. Duration ISO8601 format (PnYnMnDTnHnMnS): http://en.wikipedia.org/wiki/ISO_8601 Choosing the format P[nD]TnHnMnS where days is the total number of days (if not 0), 0 values may be omitted, 0 duration is PT0S :param tsec: float, number of seconds :param format_str: Format string, ISO 8601 is "P{D}DT{H}H{M}M{S}S". Default is a format string "P": will use ISO 8601 but skip elements that have 0 value, e.g. P1H7S instead of P1H0M7S :param format_no_day: Format string, ISO 8601, default "PT{H}H{M}M{S}S" :param format_zero: format for when tsec is 0 or None, default "PT0S" :return: Formatted time duration """ if not tsec: # 0 or None return format_zero f = Formatter() d = {} l = {'D': 86400, 'H': 3600, 'M': 60, 'S': 1} rem = long(tsec) if format_str == "P": # variable format if 0 < tsec < 86400: format_str = "PT" for i in ('D', 'H', 'M', 'S'): if i in l.keys(): d[i], rem = divmod(rem, l[i]) if d[i] != 0: format_str = "%s{%s}%s" % (format_str, i, i) else: if 0 < tsec < 86400: format_str = format_no_day k = map(lambda x: x[1], list(f.parse(format_str))) for i in ('D', 'H', 'M', 'S'): if i in k and i in l.keys(): d[i], rem = divmod(rem, l[i]) return f.format(format_str, **d)
class TableCell(): def __init__(self, content=None): self.content = content self.fmtr = Formatter() def __repr__(self): return self.display() def display(self): """ type dependent string formatting """ if isinstance(self.content, UFloat): return "{}".format(self.fmtr.format("{0:.1uS}", self.content)) elif isinstance(self.content, int): return "{}".format(self.fmtr.format("{0:.0f}", self.content)) elif isinstance(self.content, float): return "{}".format(self.fmtr.format("{0:.3f}", self.content)) elif isinstance(self.content, basestring): return self.content elif self.content is None: return "None" else: return str(self.content)
def get_value(self, key, args, kwds): """ When values are being swapped in a formatted string, if an exact match, the key is replaced by the value provided. If no values exist for a given key, simply return the variable name as its value Args: key (str or other): The expected name for the key in the formatted string args (any): The args to put in place of key kwds (iterable): key/value pairs Returns: str: The format-replaced string with all of the keys replaced """ if isinstance(key, str): try: return kwds[key] except KeyError: return key else: Formatter.get_value(key, args, kwds)
def get_value(self, key, args, kwargs): # try regular formatting first: try: return Formatter.get_value(self, key, args, kwargs) except Exception: pass # next, look in user_ns and builtins: for container in (self.shell.user_ns, __builtins__): if key in container: return container[key] # nothing found, put error message in its place return "<ERROR: '%s' not found>" % key
def get_value(self, key, args, kwds): if isinstance(key, str): if key: try: # Check explicitly passed arguments first return kwds[key] except KeyError: return self.namespace[key] else: raise ValueError("zero length field name in format") else: return Formatter.get_value(self, key, args, kwds)
def sendMessages(columns, items, input_message, image_path, selected_option, connected): with connected.get_lock(): connected.value = 0 driver = openDriver() driver.implicitly_wait(5) visitPage(driver, website) setStatus('0,0') setClear('0') setMessageError('') used_columns = [ fn for _, fn, _, _ in Formatter().parse(input_message) if fn is not None ] while (True): if connected.value == 1: not_sent_row = 2 sent_row = 2 try: wb = xl.load_workbook(xl_filename) ws = wb.active not_sent_row = len(list(ws.rows)) + 1 print 'Opening file...of errors', not_sent_row except: print "File not found, creating new file...for errors" wb = Workbook() try: s_wb = xl.load_workbook(success_filename) s_ws = s_wb.active sent_row = len(list(s_ws.rows)) + 1 print 'Opening file...of success', sent_row except: print "File not found, creating new file...of success" s_wb = Workbook() s_ws = s_wb.active s_ws.title = "sent" ws = wb.active ws.title = "failed" sent_phone_no = list() not_sent = list() for item in items: try: message = input_message.format(**item) key_flag = False for key in used_columns: # print "key : ", key, item[key] if item[key] is None: not_sent.append(item['phone']) write_details(xl_filename, wb, ws, not_sent_row, [ item['name'], item['phone'], message, "column " + key + " is null", str(datetime.datetime.now()) ]) not_sent_row = not_sent_row + 1 key_flag = True break if key_flag: continue except Exception as err: err_msg = str(err) setMessageError(err_msg) driver.quit() return try: if item['phone'] in sent_phone_no: print "Repeated user: "******" with number: ", item['phone'] # search_query = str(item['name']) + " " + str(item['phone']) search_query = str(item['phone']) searchReceiver(driver, str(search_query)) except: not_sent.append(item['phone']) print "Receiver not found: ", item['name'] write_details(xl_filename, wb, ws, not_sent_row, [ item['name'], item['phone'], message, "Receiver not found: " + item['name'], str(datetime.datetime.now()) ]) not_sent_row = not_sent_row + 1 clear_field(driver) # time.sleep(2) continue try: # verify_query = [str(item['name']), str(item['phone'])] phone = str(item['phone']).strip() verify_query = [phone[:5], phone[5:]] s_flag = findInSearchResults(driver, verify_query) if not s_flag: print "Wrong element selected for " + item['name'] not_sent.append(item['phone']) write_details(xl_filename, wb, ws, not_sent_row, [ item['name'], item['phone'], message, "Wrong element selected for " + item['name'], str(datetime.datetime.now()) ]) not_sent_row = not_sent_row + 1 clear_field(driver) # time.sleep(2) continue #################################################### if image_path: if selected_option == 0: sendMessage(driver, message) time.sleep(1) sendImage(driver, image_path) elif selected_option == 1: sendImage(driver, image_path) time.sleep(1) sendMessage(driver, message) elif selected_option == 2: sendCaption(driver, message, image_path) else: # print ("no option") pass else: sendMessage(driver, message) #################################################### # print "Number: ", item['phone'], " message sent: ", message sent_phone_no.append(item['phone']) write_details(success_filename, s_wb, s_ws, sent_row, [ item['name'], item['phone'], message, "Message sent", str(datetime.datetime.now()) ]) sent_row += 1 # print (float(len(sent_phone_no)) / float(len(items))) status_value = str( int((float(len(sent_phone_no)) / float(len(items))) * 100.0)) status_value += "," status_value += str( int((float(len(not_sent)) / float(len(items))) * 100.0)) # print "status completed : ", status_value, len(sent_phone_no) setStatus(status_value) except TimeoutException: print "Timeout for receiver ", item['name'] not_sent.append(item['phone']) write_details(xl_filename, wb, ws, not_sent_row, [ item['name'], item['phone'], message, "Timeout for receiver " + item['name'], str(datetime.datetime.now()) ]) not_sent_row += 1 # clear_field(driver) # time.sleep(1) while (True): try: not_sent_message = driver.find_element_by_xpath( "//div[@class='_1VfKB']/span[@data-icon='status-time']" ) # print "messeages not sent till now " except NoSuchElementException as err: print "messeages sent" status_value = str( int((float(len(sent_phone_no)) / float(len(items))) * 100.0)) status_value += "," status_value += str( int((float(len(not_sent)) / float(len(items))) * 100.0)) setStatus(status_value) break break else: time.sleep(3) print("process finished") driver.quit() return
def _labels_for_format(self, serialize_format): return (label for _, label, _, _ in Formatter().parse(serialize_format) if label)
def parseBase(self): self.keys = filter(lambda x: x, [i[1] for i in Formatter().parse(self.base)])
class AST(object): formatter = Formatter() def __init__(self, atoms, exp): self.atoms = atoms self.exp = exp self.params = [] self.args = [] self.kwargs = {} def eval(self, args=None, kwargs=None, params=None): self.params = params if params is not None else self.params self.args = args if args else self.args self.kwargs = kwargs or self.kwargs # Eval ast wrt to env res = self._eval(self.atoms, self.exp.env) return res def _eval(self, atom, env): if isinstance(atom, ExpressionSymbol): return atom.eval() elif isinstance(atom, ExpressionParam): value = atom.eval(self, env) return self.emit_literal(value) elif isinstance(atom, AST): return atom.eval(self.args, self.kwargs, self.params) elif not isinstance(atom, list): return self.emit_literal(atom) else: head = atom.pop(0) head = self._eval(head, env) params = [] for x in atom: val = self._eval(x, env) params.append(val) if callable(head): head = head(*params) return head def emit_literal(self, x): # Collect literal and return placeholder if isinstance(x, (tuple, list, set)): self.params.extend(x) return ", ".join("%s" for _ in x) self.params.append(x) return "%s" def __repr__(self): return "<AST [%s]>" % " ".join(map(str, self.atoms)) def is_aggregate(self): for atom in self.atoms: if isinstance(atom, AST): if atom.is_aggregate(): return True if getattr(atom, "token", None) in Expression.aggregates: return True return False
def reverse_format(format_string, resolved_string): """ Reverse the string method format. Given format_string and resolved_string, find arguments that would give ``format_string.format(**arguments) == resolved_string`` Parameters ---------- format_string : str Format template string as used with str.format method resolved_string : str String with same pattern as format_string but with fields filled out. Returns ------- args : dict Dict of the form {field_name: value} such that ``format_string.(**args) == resolved_string`` Examples -------- >>> reverse_format('data_{year}_{month}_{day}.csv', 'data_2014_01_03.csv') {'year': '2014', 'month': '01', 'day': '03'} >>> reverse_format('data_{year:d}_{month:d}_{day:d}.csv', 'data_2014_01_03.csv') {'year': 2014, 'month': 1, 'day': 3} >>> reverse_format('data_{date:%Y_%m_%d}.csv', 'data_2016_10_01.csv') {'date': datetime.datetime(2016, 10, 1, 0, 0)} >>> reverse_format('{state:2}{zip:5}', 'PA19104') {'state': 'PA', 'zip': '19104'} See also -------- str.format : method that this reverses reverse_formats : method for reversing a list of strings using one pattern """ from string import Formatter from datetime import datetime fmt = Formatter() args = {} # ensure that format_string is in posix format format_string = make_path_posix(format_string) # split the string into bits literal_texts, field_names, format_specs, conversions = zip(*fmt.parse(format_string)) if not any(field_names): return {} for i, conversion in enumerate(conversions): if conversion: raise ValueError(('Conversion not allowed. Found on {}.' .format(field_names[i]))) # ensure that resolved string is in posix format resolved_string = make_path_posix(resolved_string) # get a list of the parts that matter bits = _get_parts_of_format_string(resolved_string, literal_texts, format_specs) for i, (field_name, format_spec) in enumerate(zip(field_names, format_specs)): if field_name: try: if format_spec.startswith('%'): args[field_name] = datetime.strptime(bits[i], format_spec) elif format_spec[-1] in list('bcdoxX'): args[field_name] = int(bits[i]) elif format_spec[-1] in list('eEfFgGn'): args[field_name] = float(bits[i]) elif format_spec[-1] == '%': args[field_name] = float(bits[i][:-1])/100 else: args[field_name] = fmt.format_field(bits[i], format_spec) except: args[field_name] = bits[i] return args
def get_value(self, *args, **kwargs): try: return Formatter.get_value(self, *args, **kwargs) except KeyError: return ''
a = parser.parse_args() aa = vars(a) aa["TEMPLATE"] = a.TEMPLATE.strip() _, meta = readCsv(a.INPUT_FILE) _, samples = readCsv(a.SAMPLE_FILE) if len(a.SAMPLE_FILTER) > 0: samples = filterByQueryString(samples, a.SAMPLE_FILTER) print("%s samples after filtering" % len(samples)) # filter out meta that isn't in sampledata ufilenames = set([s[a.SAMPLE_KEY] for s in samples]) meta = [d for d in meta if d[a.META_KEY] in ufilenames] keys = [ele[1] for ele in Formatter().parse(a.TEMPLATE) if ele[1]] tmpl = Template(a.TEMPLATE) lines = [] for d in meta: fvalues = dict([(key, d[key]) for key in keys]) text = tmpl.substitute(fvalues) if len(text) > 0: lastWord = text.split()[-1] ntext = normalizeText(text) line = {"text": text, "ntext": ntext, "lastWord": lastWord} if a.SORT_BY not in line and a.SORT_BY in fvalues: line[a.SORT_BY] = fvalues[a.SORT_BY] lines.append(line) # make unique based on text, then sort lines = list({line["text"]: line for line in lines}.values())
def get_value(self, key, args, kwds): if isinstance(key, basestring) and key not in kwds: return self.default return Formatter.get_value(self, key, args, kwds)
def get_account_charges(tenancy_name, username, password, domain, idcs_guid, start_time, end_time): global csv_writer if debug: print(f'User:Pass = {username}/{"*" * len(password)}') print(f'Domain, IDCSID = {domain} {idcs_guid}') print(f'Start/End Time = {start_time} to {end_time}') # Oracle API needs the milliseconds explicitly # UsageType can be TOTAL, HOURLY or DAILY. url_params = { 'startTime': start_time.isoformat() + '.000', 'endTime': end_time.isoformat() + '.000', 'usageType': 'MONTHLY', 'dcAggEnabled': 'N', 'computeTypeEnabled': 'Y' } resp = requests.get( 'https://itra.oraclecloud.com/metering/api/v1/usagecost/' + domain, auth=(username, password), headers={'X-ID-TENANT-NAME': idcs_guid}, params=url_params) if resp.status_code != 200: # This means something went wrong. msg = json.loads(resp.text)['errorMessage'] print( f'Error in GET: {resp.status_code} ({resp.reason}) on tenancy {tenancy_name}', file=sys.stderr) print(f' {msg}', file=sys.stderr) return -1 else: # Add the cost of all items returned bill_total_cost = 0 # Ignores 'Do Not Bill' costs calc_total_cost = 0 # Uses all quantities, but uses 'Usage' costs where available if detail: # Print Headings # Headings if output_format == "CSV": csv_writer = csv_init() else: vformat = Formatter().vformat print(vformat(header_format, field_names, '')) items = resp.json() for item in resp.json()['items']: # Each service could have multiple costs (e.g. in overage) # Because of an anomoly in billing, overage amounts use the wrong unitPrice # so take the unit price from the non-overage entry costs = item['costs'] calc_unit_price = 0 std_unit_price = 0 # TESTING # Find the pricing record for the non-overage amount # This only works if there are records for overage and non-overage in the same report range!! # This code is pretty ugly, but it's a quick (temporary!) test for cost in costs: if cost['overagesFlag'] == "N": std_unit_price = cost['unitPrice'] for cost in costs: if std_unit_price == 0: # Std price not found for non-overage, so just use the (probabl) overages one calc_unit_price = cost['unitPrice'] else: calc_unit_price = std_unit_price calc_line_item_cost = calc_unit_price * cost['computedQuantity'] calc_total_cost += calc_line_item_cost if cost['computeType'] == 'Usage': bill_total_cost += cost['computedAmount'] if detail: output_dict = { 'Tenancy': tenancy_name, 'ServiceName': item['serviceName'], 'ResourceName': item['resourceName'], 'SKU': item['gsiProductId'], 'Qty': cost['computedQuantity'], 'UnitPrc': cost['unitPrice'], 'Total': cost['computedAmount'], 'Cur': item['currency'], 'OvrFlg': cost['overagesFlag'], 'ComputeType': cost['computeType'], 'BillTotalCost': bill_total_cost, 'CalcUPrc': calc_unit_price, 'LineCost': calc_line_item_cost, 'CalcTotalCost': calc_total_cost } format_output(output_dict, output_format) return bill_total_cost, calc_total_cost
def parse_number_format(format_s: str) -> NumberFormatter: """ Parse `format_s` to create a formatter function. Usage: format_number = parse_number_format("${:,.2f}") # raises ValueError format_number(1234.56) # => "$1,234.56" This is similar to Python `format()` but different: * It allows formatting float as int: `NumberFormatter('{:d}').format(0.1)` * It disallows "conversions" (e.g., `{!r:s}`) * It disallows variable name/numbers (e.g., `{1:d}`, `{value:d}`) * It raises ValueError on construction if format is imperfect * The function it returns will never raise an exception """ if not isinstance(format_s, str): raise TypeError("Format must be str") # parts: a list of (literal_text, field_name, format_spec, conversion) # # The "literal_text" always comes _before_ the field. So we end up # with three possibilities: # # "prefix{}suffix": [(prefix, "", "", ""), (suffix, None...)] # "prefix{}": [(prefix, "", "", '")] # "{}suffix": [("", "", "", ""), (suffix, None...)] parts = list(Formatter().parse(format_s)) if len(parts) > 2 or len(parts) == 2 and parts[1][1] is not None: raise ValueError("Can only format one number") if not parts or parts[0][1] is None: raise ValueError('Format must look like "{:...}"') if parts[0][1] != "": raise ValueError("Field names or numbers are not allowed") if parts[0][3] is not None: raise ValueError("Field converters are not allowed") prefix = parts[0][0] format_spec = parts[0][2] if len(parts) == 2: suffix = parts[1][0] else: suffix = "" need_int = format_spec and format_spec[-1] in _IntTypeSpecifiers # Test it! # # A reading of cpython 3.7 Python/formatter_unicode.c # parse_internal_render_format_spec() suggests the following unobvious # details: # # * Python won't parse a format spec unless you're formatting a number # * _PyLong_FormatAdvancedWriter() accepts a superset of the formats # _PyFloat_FormatAdvancedWriter() accepts. (Workbench accepts that # superset.) # # Therefore, if we can format an int, the format is valid. format(1, format_spec) # raise ValueError on invalid format def fn(value: Union[int, float]) -> str: if need_int: value = int(value) else: # Format float64 _integers_ as int. For instance, '3.0' should be # formatted as though it were the int, '3'. # # Python would normally format '3.0' as '3.0' by default; that's # not acceptable to us because we can't write a JavaScript # formatter that would do the same thing. (Javascript doesn't # distinguish between float and int.) int_value = int(value) if int_value == value: value = int_value return prefix + format(value, format_spec) + suffix return fn
def __init__(self): """Initialize the MemorizeFormatter.""" Formatter.__init__(self) self._used_kwargs = {} self._unused_kwargs = {}
def run(self, edit): # get view and location of first selection, which we expect to be just the cursor position view = self.view point = view.sel()[0].b print (point) # Only trigger within LaTeX # Note using score_selector rather than match_selector if not view.score_selector(point, "text.tex.latex"): return try: completions, prefix, post_brace, new_point_a, new_point_b = get_cite_completions(view, point) except UnrecognizedCiteFormatError: sublime.error_message("Not a recognized format for citation completion") return except NoBibFilesError: sublime.error_message("No bib files found!") return except BibParsingError as e: sublime.error_message("Bibliography " + e.filename + " is broken!") return except BibParsingError as e: sublime.error_message(e.message) return # filter against keyword, title, or author if prefix: completions = [comp for comp in completions if prefix.lower() in "%s %s %s" % ( comp['keyword'].lower(), comp['title'].lower(), comp['author'].lower())] # Note we now generate citation on the fly. Less copying of vectors! Win! def on_done(i): print ("latex_cite_completion called with index %d" % (i,) ) # Allow user to cancel if i < 0: return keyword = completions[i]['keyword'] # notify any plugins notification_thread = threading.Thread( target=run_plugin_command, args=( 'on_insert_citation', keyword ), kwargs={ 'stop_on_first': False, 'expect_result': False } ) notification_thread.daemon = True notification_thread.start() cite = completions[i]['keyword'] + post_brace #print("DEBUG: types of new_point_a and new_point_b are " + repr(type(new_point_a)) + " and " + repr(type(new_point_b))) # print "selected %s:%s by %s" % completions[i][0:3] # Replace cite expression with citation # the "latex_tools_replace" command is defined in latex_ref_cite_completions.py view.run_command("latex_tools_replace", {"a": new_point_a, "b": new_point_b, "replacement": cite}) # Unselect the replaced region and leave the caret at the end caret = view.sel()[0].b view.sel().subtract(view.sel()[0]) view.sel().add(sublime.Region(caret, caret)) # get preferences for formating of quick panel cite_panel_format = get_setting('cite_panel_format', ["{title} ({keyword})", "{author}"]) completions_length = len(completions) if completions_length == 0: return elif completions_length == 1: # only one entry, so insert entry view.run_command("latex_tools_replace", { "a": new_point_a, "b": new_point_b, "replacement": completions[0]['keyword'] + post_brace } ) # Unselect the replaced region and leave the caret at the end caret = view.sel()[0].b view.sel().subtract(view.sel()[0]) view.sel().add(sublime.Region(caret, caret)) else: # show quick formatter = Formatter() view.window().show_quick_panel([[formatter.vformat(s, (), completion) for s in cite_panel_format] \ for completion in completions], on_done)
def template_keys(self): return [x[1] for x in Formatter().parse(self.template) if x[1]]
def migrate(): ''' Migrate zones from old to new ids in datasets. Should only be run once with the new version of geozones w/ geohisto. ''' counter = Counter() drom_zone = GeoZone.objects(id='country-subset:fr:drom').first() dromcom_zone = GeoZone.objects(id='country-subset:fr:dromcom').first() # Iter over datasets with zones for dataset in Dataset.objects(spatial__zones__gt=[]): counter['datasets'] += 1 new_zones = [] for zone in dataset.spatial.zones: if zone.id.startswith('fr/'): counter['zones'] += 1 country, kind, zone_id = zone.id.split('/') zone_id = zone_id.upper() # Corsica 2a/b case. if kind == 'town': counter['towns'] += 1 new_zones.append( GeoZone .objects(code=zone_id, level='fr:commune') .valid_at(date.today()) .first()) elif kind == 'county': counter['counties'] += 1 new_zones.append( GeoZone .objects(code=zone_id, level='fr:departement') .valid_at(date.today()) .first()) elif kind == 'region': counter['regions'] += 1 # Only link to pre-2016 regions which kept the same id. new_zones.append( GeoZone .objects(code=zone_id, level='fr:region') .first()) elif kind == 'epci': counter['epcis'] += 1 new_zones.append( GeoZone .objects(code=zone_id, level='fr:epci') .valid_at(dataset.created_at.date()) .first()) else: new_zones.append(zone) elif zone.id.startswith('country-subset/fr'): counter['zones'] += 1 subset, country, kind = zone.id.split('/') if kind == 'dom': counter['drom'] += 1 new_zones.append(drom_zone) elif kind == 'domtom': counter['dromcom'] += 1 new_zones.append(dromcom_zone) elif zone.id.startswith('country/'): counter['zones'] += 1 counter['countries'] += 1 new_zones.append(zone.id.replace('/', ':')) elif zone.id.startswith('country-group/'): counter['zones'] += 1 counter['countrygroups'] += 1 new_zones.append(zone.id.replace('/', ':')) else: new_zones.append(zone) dataset.update( spatial=SpatialCoverage( granularity=dataset.spatial.granularity, zones=[getattr(z, 'id', z) for z in new_zones if z] ) ) log.info(Formatter().vformat('''Summary Processed {zones} zones in {datasets} datasets: - {countrygroups} country groups (World/UE) - {countries} countries - France: - {regions} regions - {counties} counties - {epcis} EPCIs - {towns} towns - {drom} DROM - {dromcom} DROM-COM ''', (), counter)) log.info('Done')
def get(self, count=21): return_list = [] current_count = 1 while current_count <= count: for template in self.templates: current_count += 1 with open(template, 'r') as file: data = file.read() names = [ fn for _, fn, _, _ in Formatter().parse(data) if fn is not None ] computer = Computer() f = File() org = Organization() emp = Employee() net = Network() properties = {} for item in names: if item == 'guid': properties['guid'] = str(uuid.uuid4()) elif item == 'timestamp': properties['timestamp'] = pendulum.now().add( days=random.randint(1, 15), hours=random.randint(1, 24), minutes=random.randint(1, 60), seconds=random.randint(1, 60)).to_iso8601_string() elif item == 'creation_time': properties['creation_time'] = pendulum.now().subtract( days=random.randint(1, 15), hours=random.randint(1, 24), minutes=random.randint(1, 60), seconds=random.randint(1, 60)).to_iso8601_string() elif item == 'previous_creation_time': properties['previous_creation_time'] = properties[ 'timestamp'] elif item == 'process_id': properties['process_id'] = random.randint(2000, 4000) elif item == 'thread_id': properties['thread_id'] = random.randint(2000, 4000) elif item == 'image_path': f = File() properties['image_path'] = '{}\\{}'.format( f.full_path, f.filename) elif item == 'current_directory': properties['current_directory'] = '{}'.format( f.full_path) elif item == 'parent_path': properties['parent_path'] = '{}'.format(f.full_path) elif item == 'process_state': properties['process_state'] = 'Loaded' elif item == 'computer_name': properties['computer_name'] = computer.name elif item == 'domain': properties['domain'] = org.domain elif item == 'user': properties['user'] = emp.username elif item == 'protocol': properties['protocol'] = net.protocol elif item == 'source_ip': properties['source_ip'] = Network(private=True).ipv4 elif item == 'source_port': properties['source_port'] = random.randint(100, 5000) elif item == 'destination_ip': properties['destination_ip'] = net.ipv4 elif item == 'destination_port': properties['destination_port'] = random.randint( 100, 5000) elif item == 'target_filename': properties['target_filename'] = f.filename elif item == 'registry_object': properties['registry_object'] = '{}'.format( Registry().path) elif item == 'registry_value': properties['registry_value'] = properties[ 'registry_object'].split('\\', 1)[1] elif item == 'exe': properties['exe'] = f.filename elif item == 'sha1': properties['sha1'] = f.sha1 elif item == 'sha256': properties['sha256'] = f.sha256 elif item == 'signed': properties['signed'] = f.signed elif item == 'signature': properties['signature'] = f.signature elif item == 'signature_status': properties['signature_status'] = f.signature_status elif item == 'integrity_level': properties['integrity_level'] = random.choice( ['Low', 'Medium', 'High']) return_list.append(data.format(**properties)) return return_list
def __init__(self, context=None): Formatter.__init__(self) self.context = context
def reverse_formats(format_string, resolved_strings): """ Reverse the string method format for a list of strings. Given format_string and resolved_strings, for each resolved string find arguments that would give ``format_string.format(**arguments) == resolved_string``. Each item in the output corresponds to a new column with the key setting the name and the values representing a mapping from list of resolved_strings to the related value. Parameters ---------- format_strings : str Format template string as used with str.format method resolved_strings : list List of strings with same pattern as format_string but with fields filled out. Returns ------- args : dict Dict of the form ``{field: [value_0, ..., value_n], ...}`` where values are in the same order as resolved_strings, so: ``format_sting.format(**{f: v[0] for f, v in args.items()}) == resolved_strings[0]`` Examples -------- >>> paths = ['data_2014_01_03.csv', 'data_2014_02_03.csv', 'data_2015_12_03.csv'] >>> reverse_formats('data_{year}_{month}_{day}.csv', paths) {'year': ['2014', '2014', '2015'], 'month': ['01', '02', '12'], 'day': ['03', '03', '03']} >>> reverse_formats('data_{year:d}_{month:d}_{day:d}.csv', paths) {'year': [2014, 2014, 2015], 'month': [1, 2, 12], 'day': [3, 3, 3]} >>> reverse_formats('data_{date:%Y_%m_%d}.csv', paths) {'date': [datetime.datetime(2014, 1, 3, 0, 0), datetime.datetime(2014, 2, 3, 0, 0), datetime.datetime(2015, 12, 3, 0, 0)]} >>> reverse_formats('{state:2}{zip:5}', ['PA19104', 'PA19143', 'MA02534']) {'state': ['PA', 'PA', 'MA'], 'zip': ['19104', '19143', '02534']} See also -------- str.format : method that this reverses reverse_format : method for reversing just one string using a pattern """ from string import Formatter fmt = Formatter() # get the fields from the format_string field_names = [i[1] for i in fmt.parse(format_string) if i[1]] # itialize the args dict with an empty dict for each field args = {field_name: [] for field_name in field_names} for resolved_string in resolved_strings: for field, value in reverse_format(format_string, resolved_string).items(): args[field].append(value) return args
def __init__(self, namespace): Formatter.__init__(self) self.initial_namespace = namespace self.namespace = self.initial_namespace
def generator(ir, parameters): """Generate UFC code for a finite element.""" logger.info("Generating code for finite element:") logger.info("--- family: {}".format(ir.family)) logger.info("--- degree: {}".format(ir.degree)) logger.info("--- value shape: {}".format(ir.value_shape)) logger.info("--- name: {}".format(ir.name)) d = {} d["factory_name"] = ir.name d["signature"] = "\"{}\"".format(ir.signature) d["geometric_dimension"] = ir.geometric_dimension d["topological_dimension"] = ir.topological_dimension d["cell_shape"] = ir.cell_shape d["space_dimension"] = ir.space_dimension d["value_rank"] = len(ir.value_shape) d["value_size"] = ufl.product(ir.value_shape) d["reference_value_rank"] = len(ir.reference_value_shape) d["reference_value_size"] = ufl.product(ir.reference_value_shape) d["degree"] = ir.degree d["family"] = "\"{}\"".format(ir.family) d["num_sub_elements"] = ir.num_sub_elements import ffcx.codegeneration.C.cnodes as L d["value_dimension"] = value_dimension(L, ir.value_shape) d["reference_value_dimension"] = reference_value_dimension(L, ir.reference_value_shape) statements = evaluate_reference_basis(L, ir, parameters) d["evaluate_reference_basis"] = L.StatementList(statements) statements = evaluate_reference_basis_derivatives(L, ir, parameters) d["evaluate_reference_basis_derivatives"] = L.StatementList(statements) statements = transform_reference_basis_derivatives(L, ir, parameters) d["transform_reference_basis_derivatives"] = L.StatementList(statements) statements = transform_values(L, ir, parameters) d["transform_values"] = L.StatementList(statements) statements = tabulate_reference_dof_coordinates(L, ir, parameters) d["tabulate_reference_dof_coordinates"] = L.StatementList(statements) statements = create_sub_element(L, ir) d["sub_element_declaration"] = sub_element_declaration(L, ir) d["create_sub_element"] = statements # Check that no keys are redundant or have been missed from string import Formatter fieldnames = [ fname for _, fname, _, _ in Formatter().parse(ufc_finite_element.factory) if fname ] assert set(fieldnames) == set( d.keys()), "Mismatch between keys in template and in formattting dict" # Format implementation code implementation = ufc_finite_element.factory.format_map(d) # Format declaration declaration = ufc_finite_element.declaration.format(factory_name=ir.name) return declaration, implementation
import string # string module constants print(string.ascii_letters) print(string.ascii_lowercase) print(string.ascii_uppercase) print(string.digits) print(string.hexdigits) print(string.whitespace) # ' \t\n\r\x0b\x0c' print(string.punctuation) s = ' Welcome TO \n\n JournalDev ' print(string.capwords(s)) from string import Formatter formatter = Formatter() print(formatter.format('{website}', website='JournalDev')) print(formatter.format('{} {website}', 'Welcome to', website='JournalDev')) print('{} {website}'.format('Welcome to', website='JournalDev')) from string import Template t = Template('$name is the $title of $company') s = t.substitute(name='Pankaj', title='Founder', company='JournalDev.') print(s)
import string assert string.ascii_letters == 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' assert string.ascii_lowercase == 'abcdefghijklmnopqrstuvwxyz' assert string.ascii_uppercase == 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' assert string.digits == '0123456789' assert string.hexdigits == '0123456789abcdefABCDEF' assert string.octdigits == '01234567' assert string.punctuation == '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' # FIXME #assert string.whitespace == ' \t\n\r\x0b\x0c', string.whitespace #assert string.printable == '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c' assert string.capwords('bla bla', ' ') == 'Bla Bla' from string import Template s = Template('$who likes $what') # TODO: # r = s.substitute(who='tim', what='kung pow') # print(r) from string import Formatter f = Formatter()
def __init__(self, **kwargs): super().__init__(**kwargs) self.formatter = Formatter() self.translator = Translator()
def get_multi_params(tmpl_path: str): """ Get multiparameters from template file. """ return [fname for _, fname, _, _ in Formatter().parse(yourstring) if fname]
def format_needs_cache(cls, format): format_keys = {f[1] for f in Formatter().parse(format)} return any(key in cls.KEYS_REQUIRING_CACHE for key in format_keys)
import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import time from IPython import display as ipythondisplay ##################################### def custom_progress_text(message): import progressbar from string import Formatter message_ = message.replace('(', '{') message_ = message_.replace(')', '}') keys = [key[1] for key in Formatter().parse(message_)] ids = {} for key in keys: if key is not None: ids[key] = float('nan') msg = progressbar.FormatCustomText(message, ids) return msg def create_progress_bar(text=None): import progressbar if text is None: text = progressbar.FormatCustomText('') bar = progressbar.ProgressBar(widgets=[ progressbar.Percentage(),
# -*- test-case-name: twisted.logger.test.test_flatten -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Code related to "flattening" events; that is, extracting a description of all relevant fields from the format string and persisting them for later examination. """ from string import Formatter from collections import defaultdict aFormatter = Formatter() class KeyFlattener: """ A L{KeyFlattener} computes keys for the things within curly braces in PEP-3101-style format strings as parsed by L{string.Formatter.parse}. """ def __init__(self): """ Initialize a L{KeyFlattener}. """ self.keys = defaultdict(lambda: 0) def flatKey(self, fieldName, formatSpec, conversion): """
def __init__(self, env): SandboxedFormatterMixin.__init__(self, env) Formatter.__init__(self)
def get_action_params(action): return [fn for _, fn, _, _ in Formatter().parse(action) if fn is not None]
def on_preview(self, command: Command, inputs: CommandInputs, args, input_values): max_x = self.stacked_dict['machine_width'] / 10 max_y = self.stacked_dict['machine_depth'] / 10 max_z = self.stacked_dict['machine_height'] / 10 center_is_zero = self.stacked_dict['machine_center_is_zero'] display_machine(self.graphics, max_x, max_y, max_z, center_is_zero) stacked_dict = { **self.global_settings_defaults, **self.computed_values, **self.changed_machine_settings, **self.changed_settings } interpolated_end_gcode = Formatter().vformat( stacked_dict['machine_end_gcode'], [], kwargs=stacked_dict) f = GCodeFormatter() interpolated_start_gcode = f.vformat( stacked_dict['machine_start_gcode'], [], kwargs=stacked_dict) last_minute_swaps = { 'machine_start_gcode': interpolated_start_gcode, 'machine_end_gcode': interpolated_end_gcode, **f.prepend_dict } settings = deepcopy({ **self.computed_values, **self.changed_machine_settings, **self.changed_settings, **last_minute_swaps }) bodies = input_values['selection'] slider = self.layer_slider if settings == self.running_settings and self.running_models == bodies: if self.engine_endpoint and self.engine_endpoint['done']: layer_keys = self.engine_endpoint['layers'].keys() slider.minimumValue = min(layer_keys) slider.maximumValue = max(layer_keys) linework_group = self.graphics.addGroup() if not center_is_zero: transform = linework_group.transform transform.translation = Vector3D.create( -max_x / 2, -max_y / 2, 0) linework_group.transform = transform for body in bodies: body.isVisible = False for mesh in self.engine_endpoint['mesh']: self.graphics.addMesh( CustomGraphicsCoordinates.create( mesh.nodeCoordinatesAsDouble), mesh.nodeIndices, [], []).setOpacity(0.2, True) cached_layers = self.engine_endpoint['precomputed_layers'] layer_range = set(range(slider.valueOne, slider.valueTwo)) line_types = { v.value for v in LineType if v in self.layer_type_inputs and self.layer_type_inputs[v].value } for id in layer_range.intersection( self.engine_endpoint['layers'].keys()): cached_layer = cached_layers[id] original_layer = self.engine_endpoint['layers'][id] for type in line_types.intersection( original_layer['by_type'].keys()): compute_layer_type_preview(original_layer, id, type, cached_layers) for body in cached_layer[type]: new_line = linework_group.addBRepBody(body) new_line.depthPriority = 2 AppObjects().app.activeViewport.refresh() self.info_box.text = 'preview visible' estimates = self.engine_endpoint['estimates'] time_elements = list([(k, estimates[k]) for k in TIME_KEYS if k in estimates]) total_time = sum([v for k, v in time_elements]) time_elements = [('total_time', total_time)] + time_elements time_messages = '\n'.join([ '%s: %s' % (k, str(datetime.timedelta(seconds=round(v)))) for k, v in time_elements ]) self.time_box.text = time_messages return self.running_settings = settings print('setting', settings) self.running_models = bodies for body in bodies: body.isVisible = False extruder_count = self.stacked_dict['machine_extruder_count'] (slice_msg, meshes) = get_message_and_mesh_for_engine( bodies, dict_to_setting_list(settings), 15, extruder_count) for mesh in meshes: self.graphics.addMesh( CustomGraphicsCoordinates.create(mesh.nodeCoordinatesAsDouble), mesh.nodeIndices, [], []) def on_engine(args: CustomEventArgs): layer_keys = self.engine_endpoint['layers'].keys() if len(layer_keys): slider.minimumValue = min(layer_keys) slider.maximumValue = max(layer_keys) if args.additionalInfo == 'done': self.cancel_engine() command.doExecutePreview() if args.additionalInfo == 'exception': AppObjects().ui.messageBox(repr(endpoint['exception'])) handler = event(CustomEventHandler, on_engine) endpoint = dict(handler=handler, canceled=False, done=False, estimates={}, layers={}, gcode_file=None, exception=None, mesh=meshes, precomputed_layers=defaultdict(dict)) self.cancel_engine() self.engine_event.add(handler) self.engine_endpoint = endpoint self.info_box.text = 'computing preview ...' self.time_box.text = 'computing preview ...' threading.Thread(target=run_engine_in_other_thread, args=[slice_msg, endpoint]).start()
def resolve_shortcuts(shortcuts): """ Resolves dependencies among shortcuts, (cases where one shortcut uses another one). Shortcuts are processed in topological order. # Arguments shortcuts: A {name: query} dictionary, where some queries contain placeholders, i.e. depend on other shortcuts. # Returns A new {name: query} dictionary with all placeholders properly filled in. # Raises ValueError: If there is an unmet or cyclic dependency. # Examples >>> rs = resolve_shortcuts(dict(x="a", y="{x}b", z="{y}c")) >>> rs == {'x': 'a', 'y': 'ab', 'z': 'abc'} True >>> rs = resolve_shortcuts(dict(x="a{x}")) Traceback (most recent call last): ... ValueError: Shortcuts contain a cyclic dependency. >>> rs = resolve_shortcuts(dict(x="a{y}")) Traceback (most recent call last): ... ValueError: Unmet dependency `y`. """ # Uses Kahn's algorithm for topological sorting # --- # Set of outgoing edges for each shortcut. # An edge A ---> B means that the shortcut B # depends on the shortcut A. edges = {name: set() for name in shortcuts.keys()} # Number of incoming edges for each shortcut. name2nb_deps = {name: 0 for name in shortcuts.keys()} # Fill in `edges` and `name2nb_deps` for name, query in shortcuts.items(): dependencies = [ field_name for _, field_name, __, ___ in Formatter().parse(query) if field_name is not None ] name2nb_deps[name] = len(dependencies) for dependency in dependencies: if dependency not in edges: raise ValueError("Unmet dependency `%s`." % dependency) edges[dependency].add(name) topological_order = [] candidates = { name for name, nb_deps in name2nb_deps.items() if nb_deps == 0 } while len(candidates) > 0: candidate = candidates.pop() topological_order.append(candidate) for name in edges[candidate]: name2nb_deps[name] -= 1 if name2nb_deps[name] == 0: candidates.add(name) if len(topological_order) != len(shortcuts): raise ValueError("Shortcuts contain a cyclic dependency.") new_shortcuts = dict() for name in topological_order: new_shortcuts[name] = shortcuts[name].format(**new_shortcuts) assert len(shortcuts) == len(new_shortcuts) return new_shortcuts