Пример #1
0
    def get_pre_and_post_amble(self, fname=None):
        if not fname:
            fname = self.fname
        lines = u.slurp(fname)
        self.preamble = ""
        self.postamble = []
        now = dateutil.parser.parse("")
        for line in lines:
            try:
                stub = line.split(" ",1)[0]
                if '=' in stub: stub = stub.split("=",1)[0]
                if dateutil.parser.parse(stub) != now:
                    break
            except TypeError:
                pass
            self.preamble += line + "\n"

        for line in reversed(lines): 
            try:
                if dateutil.parser.parse(line.split(" ",1)[0]) != now:
                    break
            except TypeError:
                pass
            self.postamble.insert(0,line)
        
        for line in self.postamble:
            if not line: break
            if line[0] in " \t\n":
                self.postamble = self.postamble[1:]
        self.postamble = "\n".join(self.postamble)
        return (self.preamble, self.postamble)
Пример #2
0
    def get_nightly_records(self, fname):
        """Load the FNAME datafile, and split the nightly data into individual
        records. Return a list of those records."""
        #pylint: disable=no-self-use

        # Make a list of the records in the nightly data file.  We
        # used to do this with a regex, but knowing the starting tag
        # lets us find the ending tag with more certainty.  These
        # files are so wonky, that relying on a regex that depends on
        # a minimum amount blank line separation between records
        # fails once in a while.
        records = []
        record = []
        curr_record_type = None
        nightly = u.slurp(fname, "latin-1")
        for line in nightly.split("\n"):
            if not curr_record_type:
                if not line.strip():
                    continue
                curr_record_type, _ = model.FBOTableEntry().parse_line(line)
                record.append(line)
                continue
            record.append(line)
            if line == "</%s>" % curr_record_type:
                records.append(record)
                record = []
                curr_record_type = None

        return records
Пример #3
0
 def __init__(self, pdfname, custom):
     """PDFNAME is the filename of the pdf"""
     if not "<</Creator(Bank of America)/Author(Bank of America)" in u.slurp(
             pdfname):
         self.bank = ""
         return
     self.bank = "Bank of America"
     statement.Statement.__init__(self, pdfname, custom)
Пример #4
0
def edit_multiline(what, args):
    with util.TemporaryDirectory(basedir=args.tempdir) as tempdir:
        filepath = tempdir + '/' + what
        util.create_file(filepath)
        edit_command = args.editor + ' ' + filepath
        if os.system(edit_command) == 0:
            res = util.slurp(filepath)
            return res
    return ''
Пример #5
0
def part2():
    """
    stack the layers: first layer in front, last layer in back
    0 = black, 1 = white, 2 = transparent
    therefore black/white overlap everything, and transparent overlaps nothing
    """
    image = slurp('inputs/q08')
    width, height = 25, 6
    combined = combineLayers(image, width, height)
    print(layerToImage(combined, width))
Пример #6
0
def part1():
    """
    image is split into layers of width*height
    find layer with smallest number of zeros
    return number of 1s * number of 2s
    """
    image = slurp('inputs/q08')
    width, height = 25, 6
    layers = split_every(width * height, image)
    fewestZeroLayer = min(layers, key=lambda l: l.count('0'))
    print(fewestZeroLayer.count('1') * fewestZeroLayer.count('2'))
Пример #7
0
    def fetch_cals(self, cals=None):
        """
        Download calendars to disk.
    
        cals is a list of the calendars we should process.  Just ignore the others.
        """
        # Turn cals in to a dict of calendars hashed to URLs
        cals = { k: v for k, v in self.calendars.items() if k in cals }

        http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
        for name, url in cals.items():
            fname = os.path.join(self.config['ics dir'], self.config['prefix']+name+".ics")
            org_fname = os.path.join(self.config['org dir'], self.config['prefix']+name+".org")

            # Be polite, don't fetch more than once every 10 minutes
            if os.path.exists(fname):
                mtime = os.stat(fname).st_mtime
                t = datetime.datetime.strptime(time.ctime(mtime), "%a %b %d %H:%M:%S %Y")
                now = datetime.datetime.now()
                delta = now - t
                logger.debug("{0}: {1}".format(fname, delta))
                if delta.seconds < 60*10:
                    if logger.getEffectiveLevel() > logging.DEBUG:
                        # be polite, unless we're debugging, in which case, be ruthless
                        continue

            # Fetch calendar
            logger.info("Fetching " + name)
            r = http.request('GET', url)
            if r.status != 200:
                # TODO: send me an email telling me the calendar system is broken if we have net connectivity right now
                pass

            if r.data:
                if os.path.exists(fname) and self.remove_dtstamp(u.slurp(fname))==self.remove_dtstamp(r.data):
                    ## Mark the data files with current time so we know they're up to date
                    with hide("running", "output"):
                        logger.debug("Updating timestamp for " + fname)
                        local("touch " + fname)
                        logger.debug("Updating timestamp for " + fname)
                        local("touch " + org_fname)
                    continue

                # Back up the ical file before we blow it away.
                if logger.getEffectiveLevel() <= logging.INFO:
                    logger.debug("Backing up {0} to {0}".format(fname))
                    with hide("running", "output"):
                        local('cp {0} {0}.bak'.format(fname))

                with open (fname, 'w') as OUTF:
                    logger.info('Writing ' + fname)
                    OUTF.write(r.data)
            else:
                logger.error("ICS calendar data fetch came back blank for {0}?\n".format(name))
Пример #8
0
def fetch_json(fname):
    """Read a json document from a file, minify to remove comments (json
    parser pukes b/c comments aren't spec), return data structure
    matching document.

    """
    import simplejson as json
    from json_minify import json_minify as minify # https://github.com/getify/JSON.minify/tree/python
    from collections import OrderedDict
    return json.loads(minify(u.slurp(os.path.expanduser(fname))),
                      object_pairs_hook=OrderedDict)
Пример #9
0
 def load_file(fname, files_to_load):
     log.debug("Loading " + fname)
     lines = u.slurp(fname)
     ledger = ""
     
     for line in lines:
         if line.startswith("include "):
             new_fname = line.split("include ",1)[1]
             if not new_fname.startswith("/"):
                 dirs = os.path.split(fname)[:-1]
                 new_fname = os.path.join(os.path.join(*dirs), new_fname)
             files_to_load.append(new_fname)
         else:
             ledger += line + "\n"
     return call_ledger("-f - {0} xml {1}".format(ledger_opts, search), fname, ledger)
Пример #10
0
def readConfigs(file=None):
    if not file:
        # possibly should run this through makeJsonconfFile to default the dir
        file = "conf/extract.json"
    # print >> sys.stderr, "read from file %s" % file
    try:
        r = util.slurp(open(file))
    except IOError as e:
        print >> sys.stderr, "JSON file %s not found" % file
        raise
    # print >> sys.stderr, "have read a string of len %s" % len(r)
    try:
        # print >> sys.stderr, "trying to decode"
        d = ObjectDecoder().decode(r)
        # print >> sys.stderr, "decoded to object of type %s" % type(d)
        return d
    except ValueError as e:
        print >> sys.stderr, "Bad JSON syntax in %s [%s]" % (file, e)
        raise
Пример #11
0
def readConfigs(file=None):
    if not file:
        # possibly should run this through makeJsonconfFile to default the dir
        file = "conf/extract.json"
    # print >> sys.stderr, "read from file %s" % file
    try:
        r = util.slurp(open(file))
    except IOError as e:
        print >> sys.stderr, "JSON file %s not found" % file
        raise
    # print >> sys.stderr, "have read a string of len %s" % len(r)
    try:
        # print >> sys.stderr, "trying to decode"
        d = ObjectDecoder().decode(r)
        # print >> sys.stderr, "decoded to object of type %s" % type(d)
        return d
    except ValueError as e:
        print >> sys.stderr, "Bad JSON syntax in %s [%s]" % (file, e)
        raise
Пример #12
0
def part2(filename: str, target_output: int) -> Optional[Tuple[int, int]]:
    """
    Determine the values of memory position 0 + 1 which will result in the
    target output.
    """
    code = util.slurp(filename)

    # The solution requires us to return 100 * noun + verb. Therefore it's a
    # good bet that both noun and verb are positive and have a maximum of two
    # digits.
    for noun in range(100):
        for verb in range(100):
            i = intcode.IntcodeSim(code)
            i.arr[1] = noun
            i.arr[2] = verb
            i.run()
            if i.arr[0] == target_output:
                return noun, verb

    return None
Пример #13
0
    def goose_write(self, dirname=None):
        """Writes any needed migration files to the migrations directory
        specified by DIRNAME.  Leave DIRNAME as None to just use
        ./db as the migrations directory.

        Returns list of paths to created files.
        """
        if not dirname:
            dirname = os.path.join(os.path.dirname(__file__), "db")
        dirname = os.path.join(dirname, self.db_conf['driver'])
        os.makedirs(dirname, exist_ok=True)
        created = []
        for fname, migration in self.goose().items():
            fname = os.path.join(dirname, fname)
            if os.path.exists(fname):
                if u.slurp(fname) == migration:
                    continue
                debug("Migration %s already exists. Overwriting.", fname)
            created.append(fname)
            info("Writing migration to %s", fname)
            with open(fname, 'w') as fh:
                fh.write(migration)
        return created
Пример #14
0
    def __init__(self, fname, custom):
        """FNAME is the filename of the pdf or csv

        CUSTOM is a data structure that lets us customize the data
        import.  For example, it could let us identify the people
        associated with credit card numbers.  Each bank's parser can
        use it differently.

        This init sets some defaults.

        When inheriting:

         * call the parent (this func) to get this setup done

         * check that the statement matches the bank type you're
        implementing.  If it doesn't, set self.bank to "" and return.

         * set self.bank to the bank name

        """

        self.pdfname_full = fname
        self.pdfname = os.path.split(fname)[1]
        self.custom = custom
        self.beancount_fname = os.path.splitext(fname)[0] + ".beancount"
        self.set_year_month()

        # Init some vars
        self.daily_bal = {}
        self.transactions = []
        self.paid_checks_total = 0
        self.deposits = 0

        if fname.endswith("pdf"):
            self.text = u.pdf2txt(fname)
        else:
            self.text = u.slurp(fname, decode=None)
Пример #15
0
def get_status():
    """Return the current status"""
    return {'hostname': get_hostname(),
            'time_zone': util.slurp('/etc/timezone').rstrip()}
Пример #16
0
class Direction(Enum):
    "each direction is 90' right of the previous"
    up = 0
    right = 1
    down = 2
    left = 3


MOVEMENT = {
    Direction.up: (0, -1),
    Direction.right: (1, 0),
    Direction.left: (-1, 0),
    Direction.down: (0, 1),
}

code = slurp('inputs/q11')
i = IntcodeSim(code)

# Robot position
pos = (0, 0)
# Robot turning direction
facing = Direction.up
# Dict mapping co-ords to the color painted
painted = {}

# For part2, robot is starting on a white panel instead
# Comment out for part1.
painted[pos] = Color.white


def handleInput():
Пример #17
0
 def test_part2_puzzle(self):
     answer = find_message(util.slurp("inputs/q16"))
     self.assertEqual(answer, "27683551")
Пример #18
0
 def test_part1_puzzle(self):
     answer = apply_fft(util.slurp("inputs/q16"), times=100)[0:8]
     self.assertEqual(answer, "30369587")
Пример #19
0
import util
import parse
import byteconv
import bytedump
import backend.interp as interp

ast = parse.parse(util.slurp('main.tach'))
opcodes = byteconv.byteconv(ast)
fil = open('cache/out.txt', 'w')
fil.write(bytedump.strdump(opcodes))
fil.close()
interp.byterun(opcodes)
Пример #20
0
 def load_from_text(self):
     self.text = u.slurp(self.fname_text)