示例#1
0
文件: __main__.py 项目: gwk/muck
def muck_deps(ctx, args):
  '''
  `muck deps` command: print dependency information.
  '''
  args = frozenset(args) # deduplicate arguments.
  targets = args or frozenset(ctx.db.all_target_names())

  for target in sorted(targets):
    update_dependency(ctx, target, dependent=None)

  roots = set(args) or { t for t in targets if t not in ctx.dependents }
  roots.update(t for t, s in ctx.dependents.items() if len(s) > 1)

  def visit(depth, target):
    deps = all_deps_for_target(ctx, target)
    dependents = ctx.dependents[target]
    if depth == 0 and len(dependents) > 0:
      suffix = ' (dependents: {}):'.format(' '.join(sorted(dependents)))
    elif len(dependents) > 1: suffix = '*'
    elif len(deps) == 0:      suffix = ''
    else:                     suffix = ':'
    outL('  ' * depth, target, suffix)
    if depth > 0 and len(dependents) > 1: return
    for dep in deps:
      visit(depth + 1, dep)

  for root in sorted(roots):
    outL()
    visit(0, root)
示例#2
0
文件: __main__.py 项目: gwk/muck
 def visit(depth, target):
   deps = all_deps_for_target(ctx, target)
   dependents = ctx.dependents[target]
   if depth == 0 and len(dependents) > 0:
     suffix = ' (dependents: {}):'.format(' '.join(sorted(dependents)))
   elif len(dependents) > 1: suffix = '*'
   elif len(deps) == 0:      suffix = ''
   else:                     suffix = ':'
   outL('  ' * depth, target, suffix)
   if depth > 0 and len(dependents) > 1: return
   for dep in deps:
     visit(depth + 1, dep)
示例#3
0
文件: legs.py 项目: gwk/legs
def match_string(nfa:NFA, fat_dfa:DFA, min_dfa:DFA, string: str) -> None:
  '''
  Test `nfa`, `fat_dfa`, and `min_dfa` against each other by attempting to match `string`.
  This is tricky because each is subtly different:
  * NFA does not have any transitions to `invalid`.
  * fat DFA does not disambiguate between multiple match states.
  Therefore the minimized DFA is most correct,
  but for now it seems worthwhile to keep the ability to check them against each other.
  '''
  nfa_matches = nfa.match(string)
  fat_dfa_matches = fat_dfa.match(string)
  if nfa_matches != fat_dfa_matches:
    if not (nfa_matches == frozenset() and fat_dfa_matches == frozenset({'invalid'})): # allow this special case.
      exit(f'match: {string!r}; inconsistent matches: NFA: {nfa_matches}; fat DFA: {fat_dfa_matches}.')
  min_dfa_matches = min_dfa.match(string)
  if not (fat_dfa_matches >= min_dfa_matches):
    exit(f'match: {string!r}; inconsistent matches: fat DFA: {fat_dfa_matches}; min DFA: {min_dfa_matches}.')
  assert len(min_dfa_matches) <= 1, min_dfa_matches
  if min_dfa_matches:
    outL(f'match: {string!r} -> {first_el(min_dfa_matches)}')
  else:
    outL(f'match: {string!r} -- <none>')
示例#4
0
    continue

  for m in ws_re.finditer(line):
    ws = m.group(0)
    assert ws == ' ' or ws == '\n'

  line = line[:-1] # remove newline.

  if line.startswith('<p>'):
    checkF(not in_para, 'nested <p>')
    in_para = True
    line = line[3:] # strip opening p tag; line might now be empty again.
  else:
    checkF(in_para, 'missing previous <p>')

  is_closed = line.endswith('</p>')
  if is_closed:
    line = line[:-4] # strip closing p tag.

  # both leading and trailing space cases seem legitimate.
  #checkF(not line[0].isspace() or is_leading_space_ok(line), 'leading space')
  #checkF(not line[-1].isspace(), 'trailing space')
  lines.append(line.strip())

  if is_closed:
    joined = ' '.join(l for l in lines if l)
    if joined:
      outL(joined)
    del lines [:]
    in_para = False
示例#5
0
# Find all '&…;' entity sequences in the text and output a list.
# The micra text used SGML/ISO-8879 encodings, plus many nonstandard ones.

# However the webfont.txt document also states:
# | Note that the symbols used here are in some cases abbreviations
# | (for compactness) of the ISO 8879 recommended symbols.

# webfont.txt also states that an escape syntax '<xx/' is used.
# While these appear in the PG 29765.utf8 text,
# this syntax is not present in the 0.50 texts, as shown by the assertion;
# presumably they were converted to entity syntax.

import re

from pithy.io import outL
from pithy.loader import load


text = load('wb/raw-lines.txt')

entities = set()

for i, line in enumerate(text):
  if re.search(r'<[^/>]+/', line): exit(f'weird escape syntax: {line!r}')
  for m in re.finditer(r'&([^;\s]*);', line):
    e = m.group(1)
    entities.add(e)

for e in sorted(entities):
  outL(e)
示例#6
0
      outF('= {:4}m', sub_minutes)
      if m != sub_minutes:
        outF(' *** found: {}; calculated: {}',  m, sub_minutes)
        valid = False

    money_match = money_re.match(line)
    if money_match:
      s = ''.join(money_match.groups())
      i = float(s)
      if (i < 0):
        total_payment += i
      else:
        total_expense += i
      outF('               {: 10,.2f}', i)

    outL()


hours = total_minutes // 60
rem_minutes = int(total_minutes) % 60
time_expense = hourly_rate * total_minutes / 60

if hourly_rate:
  hourly_string = ' @ {:0.2f}/hr = ${:,.2f}'.format(hourly_rate, time_expense)
else:
  hourly_string = ''

outL()
outFL('TOTAL HOURS:   {:2}:{:02}{}', hours, rem_minutes, hourly_string)
outFL('TOTAL EXPENSE: ${:,.2f}', total_expense)
outFL('TOTAL PAYMENT: ${:,.2f}', total_payment)
示例#7
0
#!/usr/bin/env python3

from pithy.io import outL, errL
import locale

errL("locale: ", locale.getpreferredencoding(do_setlocale=True))
errL('…')
outL('…')