示例#1
0
def _build_custom_structs():
    from collections import namedtuple as _nt
    rest = 'diff_lines emit_edited'.split()
    ur = _nt('UpdateResult', ('before_entity', 'after_entity', *rest))
    cr = _nt('CreateResult', ('created_entity', *rest))
    dr = _nt('DeleteResult', ('deleted_entity', *rest))
    attrs = 'for_update for_create for_delete'.split()
    return _nt('These', attrs)(for_update=ur, for_create=cr, for_delete=dr)
def _define_higher_level_functions():
    """We are adamant (for now) that the result of indexing a single node tree
    be a *stream* (iterator) of name-value pairs that might go in to making an
    index; it's up to the client to consume this stream and cherry-pick (or
    otherwise process) the elements in a manner appropriate to the use case.
    Having said that, here's an example all-purpose ting
    """

    def export():
        yield 'public', 'tree_index_via', tree_index_via
        yield 'public', 'pretend_big_index', pretend_big_index
        yield 'protected', '_named_tuple_for_big_index', _BigIndex

    def to_node_tree_index_items(self):
        return ((k, tree_index_via(k, tup))
                for k, tup in self.built.items())

    def pretend_big_index(root_EID, items):
        ti = tree_index_via(root_EID, items)
        return _NOT_SURE(root_EID, ti)

    def tree_index_via(root_EID, items):
        slots, cx_of = {k: None for k in simple_fields}, {}
        for k, val in items:
            if 'expanded_children' == k:
                parent_eid, cx_eids = val
                assert parent_eid not in cx_of
                cx_of[parent_eid] = cx_eids
                continue
            if slots[k] is not None:
                xx(f"wasn't expecting there to already be a {k!r}")
            slots[k] = val
        return _TreeIndex(root_EID, children_of=cx_of, **slots)

    from collections import namedtuple as _nt

    _BigIndex = _nt('_BigIndex', ('built', 'cache'))
    _BigIndex.to_node_tree_index_items = to_node_tree_index_items

    simple_fields = """
        document_depth_minmax overall_depth business_entity_cache
    """.split()

    pub_dct, prot_dct = {}, []
    for visi, k, v in export():
        if 'public' == visi:
            pub_dct[k] = v
            continue
        assert 'protected' == visi
        prot_dct.append((k, v))

    cls = _nt('_FX', pub_dct.keys())
    for k, v in prot_dct:
        setattr(cls, k, v)  # BE CAREFUL
    return cls(** pub_dct)
示例#3
0
def _build_soeifjslefj():
    from collections import namedtuple as _nt
    return _nt(
        'selfkse', """
                component_path
                emacs_field_height
                emacs_field_width
                emacs_field_y
                emacs_field_x""".split())
示例#4
0
class BM(_nt('_Bm', 'tag, status, payload, seeds')):
    __slots__ = ()

    def identify(self, uid):
        """
        Returns a new message with updated seeds.
        Previous seeds (if any) are removed and the provided unique id is
        stored in the appropriate structure (i.e. seeds dict).
        """
        seeds = pm(**{self.tag: se(uid)})
        return self._replace(seeds=seeds)
示例#5
0
def _read_manifest_plist(manifestplist):
    ''' read Manifest.plist into a simple object.

    Here, the toplevel fields of the plist are converted into namedtuple
    fields. Subfields are left as dictionaries. '''

    m = _plistlib.readPlist(manifestplist)
    # here too, only convert toplevel fields into namedtuple fields
    keys = sorted(m.keys())
    vals = [m[k] for k in keys]
    manifest = _nt('manifest', ' '.join(keys))(*vals)
    return manifest
示例#6
0
class API(object):
    Var = _nt("Var", ("name", "type"))
    Arg = _nt("Arg", ("name", "type", "kind"))
    Func = _nt("Func", ("name", "args", "returns"))
    Class = _nt("Class", ("name", "path", "body"))
    Module = _nt("Module", ("name", "path", "body"))
    Unknown = _nt("Unknown", ("name", "type", "info"))
示例#7
0
def _read_status_plist(statusfile):
    ''' read Status.plist into a simple object.

    Here, the toplevel fields of the plist are converted into namedtuple
    fields. Subfields are left as dictionaries. '''

    s = _plistlib.readPlist(statusfile)

    # only convert toplevel fields into namedtuple fields, since we have
    # encountered names that are not proper Python identifiers in some of the
    # subfields (i.e. start with number or underscore) and I do not want to
    # mangle those names.
    keys = sorted(s.keys())
    vals = [s[k] for k in keys]
    status = _nt('status', ' '.join(keys))(*vals)
    return status
示例#8
0
def _db_file_records(db):
    ''' yield all records from the Files table as namedtuples '''

    # perform the query
    q = '''SELECT * FROM Files'''
    c = db.cursor().execute(q)

    # check if columns match what we expect
    colnames = list(zip(*c.description))[0]
    # we expect the following columns in the Files table
    _expected_filerec = ('fileID', 'domain', 'relativePath', 'flags', 'file')
    if colnames != _expected_filerec:
        raise ValueError('Unexpected table layout for Files table')

    # create a namedtuple for the unprocessed file records for convenience
    record = _nt('file', ' '.join(colnames))

    for r in c:
        r = record(*r)
        # the file column contains a plist that needs additional parsing
        p = _db_parse_file_column(r.file)
        # relativePath should match
        if p.relpath != r.relativePath:
            raise ValueError('relativePath mismatch!')

        # the value in the flags field always seems to correspond to the
        # filetype when derived from the mode field (1 = RegularFile,
        # 2=Directory, 3=Symlink). Test this here and abort if this assumption
        # is broken.
        if (r.flags == 1 and p.filetype != FileType.RegularFile):
            raise ValueError('assumption broken on flags field')
        elif (r.flags == 2 and p.filetype != FileType.Directory):
            raise ValueError('assumption broken on flags field')
        elif (r.flags == 4 and p.filetype != FileType.Symlink):
            raise ValueError('assumption broken on flags field')

        yield _file_entry(r.fileID, r.domain, r.relativePath, p.uid, p.gid,
                          p.mtime, p.ctime, p.btime, p.inode, p.mode,
                          p.filetype, p.permissions, p.size, p.protection,
                          p.extended_attributes, p.linktarget, p.digest)
示例#9
0
文件: _ioimpl.py 项目: sstcam/SSDAQ
    def load_all_data(self, tm, calib=None, mapping=None):
        """ Loads all rows of data for a particular target moduel into memory (in the future a selection of modules)

            Args:
                tm (int):   The slot number of the target module

            Kwargs:
                calib (arraylike): an array with calibration coefficient that should be applied to the data
                mapping (str or arraylike): a string to select a mapping  or an array with the mapping
                                            ['ssl2colrow','ssl2asic_ch','raw']
        """
        if calib is None:
            calib = 1.0
        if mapping is None:
            mapping = self.map
        elif isinstance(mapping, str):
            if mapping == "raw":
                mapping = np.arange(N_TM_PIX)
            else:
                try:
                    mapping = ss_mappings.__getattribute__(mapping)
                except:
                    raise ValueError("No mapping found with name %s" % mapping)

        amps = np.zeros((self.n_readouts, N_TM_PIX))
        time = np.zeros(self.n_readouts, dtype=np.uint64)
        cpu_t = np.zeros(self.n_readouts)
        iro = np.zeros(self.n_readouts, dtype=np.uint64)

        for i, r in enumerate(self.read()):
            amps[i, :] = self.raw_data[tm, :] * calib
            time[i] = self.time
            cpu_t[i] = self.cpu_t
            iro[i] = self.iro

        amps = amps[:, mapping]

        ssdata = _nt("ssdata", "iro amps time cpu_t tm")
        return ssdata(iro, amps, time, cpu_t, tm)
def _build_custom_structs():
    from collections import namedtuple as _nt
    these = ('emit_edited', )
    fd = _nt('DeleteResult', ('deleted_entity', *these))
    return _nt('These', ('for_delete', ))(fd)
示例#11
0
            if '\n' == char:
                scn.advance_by_one()
                assert scn.empty
                break

            if '"' == char:
                xx("have fun parsing escaped quotes")

            xx(f"unexpected character {char!r}")

    return spans_via_line


# == Models

_MinimalSchema = _nt('MiniamSchema', ('field_name_keys', ))
_MinimalEntity = _nt('MinimalEntity', ('core_attributes', ))


def _AST_definitions():
    yield 'line', 'sexps', 'as', 'cells', 's', 'as', 'line_tail'
    yield 'cell', 's', 's', 'as', 'cell_content'


# == Low-level might abstract


def _build_stream_join(sep):
    def stream_join(itr):  # :[#612.4] near [#611] things we do with scanners
        scn = func(itr)
        yield scn.next()  # ..
示例#12
0
            os.close(fd)
        except:
            pass
    if not cr:
        cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
    return int(cr[1]), int(cr[0])


# =====================================================================


class ShebangNotFoundError(Exception):
    pass


Extension = _nt("Extension", ["name", "interpreters"])
Extension.__doc__ = "The extension that will have all the associated interpreters."


class Interpreter(object):
    """The Interpreter"""
    def __init__(self,
                 name,
                 version='',
                 extension='',
                 default=False,
                 paths=None):
        # type: (str, str, str, bool, List[InterpreterPath]) -> None
        """Constructor.
        
        :param name: the name of the interpreter
示例#13
0
    yield 'natural_key', None
    yield 'hierarchical_container_type', use_HCT
    yield 'heading', use_heading
    yield 'document_datetime', None
    yield 'body', use_body
    yield 'children', use_children
    yield 'next', use_next
    yield 'annotated_entity_revisions', None


def _index_the_graph(graph):
    kw = {k: v for k, v in _do_index_the_graph(graph)}
    return _GraphIndex(**kw)


_GraphIndex = _nt('_GraphIndex', ('children_of', 'parent_of'))


def _do_index_the_graph(graph):

    children_of, parent_of = {}, {}

    def _add_parent_child(parent_EID, child_EID):
        if (arr := children_of.get(parent_EID)) is None:
            children_of[parent_EID] = (arr := [])
        assert child_EID not in arr
        arr.append(child_EID)

        assert child_EID not in parent_of
        parent_of[child_EID] = parent_EID
示例#14
0
文件: math.py 项目: dolkow/xnbread
#!/usr/bin/env python3
#coding=utf8

from . import *

from collections import namedtuple as _nt

Vector3 = _nt('Vector3', 'x y z')
def vector3(factory):
	return Vector3(single(factory), single(factory), single(factory))
add_reader(vector3, 'Microsoft.Xna.Framework.Content.Vector3Reader', 'Microsoft.Xna.Framework.Vector3', True)

Rectangle = _nt('Rectangle', 'x y w h')
def rectangle(factory):
	return Rectangle(i32(factory), i32(factory), i32(factory), i32(factory))
add_reader(rectangle, 'Microsoft.Xna.Framework.Content.RectangleReader', 'Microsoft.Xna.Framework.Rectangle', True)
示例#15
0
def statistitican_via_collection_path(coll_path):
    def document_commits_via_title(vendor_document_title):
        """(before #history-B.4 we could get the history in one commit
        with a JOIN. but now (to accomodate rigged documents) we do it in
        two which is fine.)
        """

        c = execute(
            'SELECT notecard_based_document_ID, just_kidding_document_type '
            'FROM notecard_based_document '
            'WHERE document_title_from_vendor=?', (vendor_document_title, ))

        # Maybe we have no record of this document at all (strange)
        first_row = c.fetchone()
        if first_row is None:
            return
        assert c.fetchone() is None

        docu_ID, typ, = first_row

        if 'docu_type_common' == typ:
            return for_notecard_based_document(docu_ID)
        assert 'docu_type_rigged' == typ
        return for_rigged_document(vendor_document_title)  # ick/meh

    def for_rigged_document(vendor_document_title):
        c = execute(
            'SELECT RDC.* '
            'FROM rigged_document_commit AS RDC '
            'JOIN rigged_document AS RD USING (rigged_document_ID) '
            'WHERE RD.document_title_from_vendor=? '
            'ORDER BY datetime(RDC.normal_datetime) ',
            (vendor_document_title, ))

        # (we want to make it be commit-graph order not chrono order,
        #  but not badly enough to do it knowing that it's not covered)

        def mutable_threes():
            while True:
                row = c.fetchone()
                if not row:
                    break
                rec = RD_commit_record(*row)
                dt = datetime_via_record(rec)
                yield [dt, 'edit', rec]

        scn = _scanner_via_iterator(mutable_threes())

        # If there are no commits in the database for this docu, strange
        if scn.empty:
            return

        return docu_CIs_via_threes_scanner(scn, 'docu_type_rigged')

    def for_notecard_based_document(docu_ID):
        c = execute(
            'SELECT NBDC.* FROM notecard_based_document_commit as NBDC '
            'WHERE NBDC.notecard_based_document_ID=? '
            'ORDER BY datetime(NBDC.normal_datetime)', (docu_ID, ))

        def mutable_threes():
            while True:
                row = c.fetchone()
                if not row:
                    break
                rec = NB_commit_rec_via_row(row)
                dt = datetime_via_record(rec)
                yield [dt, 'edit', rec]

        scn = _scanner_via_iterator(mutable_threes())

        # If there are no commits in the database for this docu, strange
        if scn.empty:
            return

        return docu_CIs_via_threes_scanner(scn, 'docu_type_common')

    def docu_CIs_via_threes_scanner(scn, typ):

        scn.peek[1] = 'create'  # meh

        while True:
            three = scn.next()
            yield _DocumentCommit(*three, typ)
            if scn.empty:
                break

    # Datetime via record
    def datetime_via_record(rec):
        rec.tzinfo  # hi
        return strptime(rec.normal_datetime, '%Y-%m-%d %H:%M:%S')

    from datetime import datetime as _
    strptime = _.strptime

    # Connect to database
    from pho.document_history_._model import \
        database_via_collection_path_ as func
    # (it's a sibling file to us but we are an entrypoint file)

    db = func(coll_path)
    assert db

    # Prepare statistics
    sing = db.singleton_text

    k = 'mean_and_std'
    two_as_string = sing.get(k)
    if two_as_string is None:
        xx(f"Did you generate the statistics? Not found: {k!r}")
    mean_s, std_s = two_as_string.split(' ')
    mean, std = float(mean_s), float(std_s)

    k = 'mean_and_std_for_rigged'
    two_as_string = sing.get(k)
    if two_as_string is None:
        xx(f"Did you generate the statistics? Not found: {k!r}")
    mean_s, std_s = two_as_string.split(' ')
    mean_for_rigged, std_for_rigged = float(mean_s), float(std_s)

    from pho.document_history_._model import \
        RiggedDocumentCommitRecord_ as RD_commit_record

    NB_commit_rec_via_row = \
        db.notecard_based_document_commit_table.NBD_CI_via_row_

    execute = db.conn.execute

    # == BEGIN meh
    from dataclasses import dataclass as _dataclass
    from collections import namedtuple as _nt

    @_dataclass
    class _Statistician:
        mean: float
        std: float
        mean_for_rigged: float
        std_for_rigged: float
        document_commits_via_title: callable
        db: object

    _DocumentCommit = _nt('_DocumentCommit',
                          ('datetime', 'verb', 'record', 'document_type'))

    # == END

    return _Statistician(mean=mean,
                         std=std,
                         mean_for_rigged=mean_for_rigged,
                         std_for_rigged=std_for_rigged,
                         document_commits_via_title=document_commits_via_title,
                         db=db)
示例#16
0
class AbstractSTScan():
    """For testing and verification.  Coordinates are as usual, but timestamps
    are just float values, with 0 being the end time, and e.g. 10 being 10
    units into the past.
    """
    def __init__(self, coords, timestamps):
        self.coords = _np.asarray(coords)
        self.timestamps = _np.asarray(timestamps)
        if len(self.timestamps) != self.coords.shape[1]:
            raise ValueError(
                "Timestamps and Coordinates must be of same length.")
        self._sort_times_increasing()
        self._unique_points = self._make_unique_points()
        self.geographic_radius_limit = 100
        self.geographic_population_limit = 0.5
        self.time_max_interval = 28
        self.time_population_limit = 0.5
        self.only_full_disks = False

    def _sort_times_increasing(self):
        self.arg_sort = _np.argsort(self.timestamps)
        self.timestamps = self.timestamps[self.arg_sort]
        self.coords = self.coords[:, self.arg_sort]

    def allowed_times_into_past(self):
        """Find the times into the past which satisfy the constraints of
        maximum time interval, and maximum time population.
        
        :return: Array of times into the past, in increasing order.
        """
        mask = self.timestamps <= self.time_max_interval
        if not _np.any(mask):
            return []
        times = _np.unique(self.timestamps[mask])
        times.sort()
        index = len(times) - 1
        cutoff = times[index]
        maxsize = int(self.time_population_limit * self.timestamps.shape[0])
        while _np.sum(self.timestamps <= cutoff) > maxsize:
            index -= 1
            if index == -1:
                return []
            cutoff = times[index]
        return times[:index + 1]

    Disc = _nt("Disc", ["centre", "radius_sq", "mask"])

    def _make_unique_points(self):
        """Return an array of the unique coordinates."""
        return _np.array(list(set((x, y) for x, y in self.coords.T))).T

    @staticmethod
    def _product(s):
        if len(s) == 1:
            yield s
        else:
            for i in range(1, len(s) + 1):
                yield from _itertools.combinations(s, i)

    def all_discs_around(self, centre):
        """Find all discs around the centre.  Applies the rules above: no disc
        contains a single point, and the rule about boundary cases.
        
        Is a generator, yields pairs (radius**2, mask)
        """
        centre = _np.asarray(centre)
        limit = self.timestamps.shape[0] * self.geographic_population_limit

        if self.only_full_disks:
            distsqun = _np.sum((self.coords - centre[:, None])**2, axis=0)
            uniques = _np.unique(distsqun)
            uniques = uniques[uniques <= self.geographic_radius_limit**2]
            uniques.sort()
            for d in uniques:
                mask = distsqun <= d
                if _np.sum(mask) > limit:
                    return
                yield (d, mask)
            return

        distsqun = _np.sum((self._unique_points - centre[:, None])**2, axis=0)
        index_array = _np.arange(len(distsqun))
        uniques = _np.unique(distsqun)
        uniques = uniques[uniques <= self.geographic_radius_limit**2]
        uniques.sort()

        # Zero case
        mask = (self.coords[0] == centre[0]) & (self.coords[1] == centre[1])
        count = _np.sum(mask)
        if count > 1:
            if count > limit:
                return
            yield (0, mask)

        current_mask = mask
        for d in uniques[1:]:
            new_indices = index_array[distsqun == d]
            seen_too_large = False
            new_mask = current_mask.copy()
            for to_add in self._product(new_indices):
                mask = current_mask.copy()
                for i in to_add:
                    mask |= ((self.coords[0] == self._unique_points[0][i]) &
                             (self.coords[1] == self._unique_points[1][i]))
                new_mask |= mask
                if _np.sum(mask) > limit:
                    seen_too_large = True
                else:
                    yield (d, mask)
            if seen_too_large:
                return
            current_mask = new_mask

    def all_discs(self):
        """Generate all discs according to the rules.
        
        Is a generator, yielding Disc objects.
        """
        all_masks = set()
        for centre in self._unique_points.T:
            for rr, mask in self.all_discs_around(centre):
                m = tuple(mask)
                if m not in all_masks:
                    yield self.Disc(centre, rr, mask)
                    all_masks.add(m)

    Result = _nt("Result", ["centre", "radius", "mask", "time", "statistic"])

    def build_times_cutoff(self):
        """Returns pair (times, cutoff) where `times` is an array of all valid
        times into the past to test, in increasing order, and `cutoff[i]` is
        the greatest index, plus one, into `self.timestamps` whose value is
        `<= times[i]`.
        """
        times = self.allowed_times_into_past()
        cutoff = []
        i = 0
        for t in times:
            while i < self.timestamps.shape[0] and self.timestamps[i] <= t:
                i += 1
            if i == self.timestamps.shape[0]:
                cutoff.append(self.timestamps.shape[0])
            else:
                cutoff.append(i)
        return times, cutoff

    def score_clusters(self):
        """A generator returning triples `(disc, time, statistic)` describing
        each cluster of relevance.
        """
        N = self.timestamps.shape[0]
        times, cutoff = self.build_times_cutoff()
        for disc in self.all_discs():
            space_count = _np.sum(disc.mask) / N
            for c in cutoff:
                actual = _np.sum(disc.mask[:c])
                expected = space_count * c
                if actual > 1 and actual > expected:
                    yield (disc, self.timestamps[c - 1],
                           self._statistic(actual, expected, N))

    @staticmethod
    def _not_intersecting(all_clusters, cluster):
        return [
            cc for cc in all_clusters
            if _np.sum((cc.centre - cluster.centre)**2) > (cluster.radius +
                                                           cc.radius)**2
        ]

    def find_all_clusters(self):
        """Find all the disjoint clusters from most to least significant."""
        all_clusters = [
            self.Result(centre=c[0].centre,
                        radius=_np.sqrt(c[0].radius_sq),
                        mask=c[0].mask,
                        time=c[1],
                        statistic=c[2]) for c in self.score_clusters()
        ]
        all_clusters.sort(key=lambda r: -r.statistic)
        while len(all_clusters) > 0:
            c = all_clusters[0]
            yield c
            all_clusters = self._not_intersecting(all_clusters, c)

    @staticmethod
    def _statistic(actual, expected, total):
        """Calculate the log likelihood"""
        stat = actual * (_np.log(actual) - _np.log(expected))
        stat += (total - actual) * (_np.log(total - actual) -
                                    _np.log(total - expected))
        return stat

    def to_satscan(self, filename, offset):
        """Writes the training data to two SaTScan compatible files.  Does
        *not* currently write settings, so these will need to be entered
        manually.  The timestamps are rounded down to an integer.
        
        :param filename: Saves files "filename.geo" and "filename.cas"
          containing the geometry and "cases" repsectively.
        :param offset: The "end time" in generic units, from which the
          `timestamps` are subtracted.
        """
        self.write_to_satscan(filename, offset, self.coords, self.timestamps)

    @staticmethod
    def write_to_satscan(filename, offset, coords, timestamps):
        unique_coords = list(set((x, y) for x, y in coords.T))
        with open(filename + ".geo", "w") as geofile:
            for i, (x, y) in enumerate(unique_coords):
                print("{}\t{}\t{}".format(i + 1, x, y), file=geofile)

        unique_times = list(set(t for t in timestamps))
        with open(filename + ".cas", "w") as casefile:
            for i, (t) in enumerate(unique_times):
                pts = coords.T[timestamps == t]
                pts = [(x, y) for x, y in pts]
                import collections
                c = collections.Counter(pts)
                for pt in c:
                    index = unique_coords.index(pt)
                    print("{}\t{}\t{}".format(index + 1, c[pt],
                                              int(offset - t)),
                          file=casefile)
示例#17
0
        return _TempCommitRecord(*row)

    def _create_table_(self):
        self._execute("""CREATE TEMPORARY TABLE commit_queue (
            SHA TEXT PRIMARY KEY,
            parent_SHA TEXT,
            child_SHA TEXT,
            datetime TEXT NOT NULL,
            message_indented TEXT NOT NULL,
            file_paths TEXT NOT NULL)""")
        self._commit()  # ??
        return


_TempCommitRecord = _nt(
    '_TempCommitRecord',
    'SHA parent_SHA child_SHA datetime message_indented file_paths'.split())


class _CommitTable(_Table):
    def accept_commit(self, temp_rec, parent_ID):
        SHA, parent_SHA, child_SHA, datetime, message_indented, file_paths = \
            temp_rec
        return self.insert_commit(SHA,
                                  datetime,
                                  message_indented,
                                  parent_ID=parent_ID)

    def insert_commit(self,
                      SHA,
                      datetime,
示例#18
0
        _my_assert_sequence_equal(self, act_scn, exp_scn)

    do_debug = False


def _schema_via_sexp(sx):
    stack = list(reversed(sx))
    assert 'schema' == stack.pop()
    kwargs = {}
    while stack:
        k = stack.pop()
        kwargs[k] = stack.pop()
    return _Schema(**kwargs)


_EndStateSchemaAndEntitySTREAM = _nt('ES01', ('schema', 'entity_STREAM'))

_Schema = _nt('MinimalSchema', ('field_name_keys', ))


class _JustEnoughEntity:
    def __init__(self, dct):
        self.core_attributes = dct

    def __eq__(self, otr):
        cad = otr.core_attributes
        return self.core_attributes == cad


# == Low-level and likely to abstract
示例#19
0
文件: json.py 项目: hipe/downtownfun
    # Last item only: smunge that ']' on to there
    line = ''.join((last_line_of_previous, ']\n'))
    yield line


def _build_identifier_builder(_listener, _cstacker=None):
    def iden_via_primitive(x):  # #[#877.4] this might become default
        assert isinstance(x, str)
        assert len(x)
        return x

    return iden_via_primitive


_JustEnoughEntity = _nt('JustEnoughEntity', ('core_attributes', ))
_MinimalSchema = _nt('MinimalSchema', ('field_name_keys', ))


def _scnlib():
    import text_lib.magnetics.scanner_via as module
    return module


def xx(msg=None):
    raise RuntimeError(msg or "wee")


# #history-B.1 initial spike
# #born as nonworking stub
示例#20
0
    def __sub__(self, a):
        if isinstance(a, IP4Addr):
            raise TypeError("Can't subtract IP4Addr from IP6Addr")
        return IP6Addr(long.__sub__(self, a))

    def __and__(self, a):
        if isinstance(a, IP4Addr):
            raise TypeError("Can't do logical 'and' of IP6Addr and IP4Addr")
        return IP6Addr(long.__and__(self, a))

    def __or__(self, a):
        if isinstance(a, IP4Addr):
            raise TypeError("Can't do logical 'or' of IP6Addr and IP4Addr")
        return IP6Addr(long.__or__(self, a))

IPRangeParentType = _nt("IPRange", ["start", "end"])


class IPRange(IPRangeParentType):
    __slots__ = ()

    def __new__(cls, param1, param2=None):
        """
        Params may be:
            start, end  - first and last address of a range as IPAddr, string or number
            string        - string in "prefix" format, e.g. 1.2.3.0/24
        """
        if param2 is None:
            return cls.fromString(param1)
        else:
            return cls.fromPair(param1, param2)
示例#21
0
    ''' raised when extraction fails for some reason '''
    pass


class MbdbParseError(PearBackError):
    ''' raised when parsing of older Manifest.mbdb fails '''
    pass


# instead of defining full classes, I use namedtuples for simplicity

# a backup object consists of rootdir, connection to Manifest.db (None when
# dealing with backups from devices with iOS9 or below), parsed
# status.plist and manifest.plist and a partial generator function for
# iterating over parsed file-records from Manifest.db
_backup = _nt('iOSbackup', 'backuptype rootdir db status manifest filerecords')

# a simple object for representing a single entry in the Files table
_file_entry = _nt(
    'file_entry', 'fileID domain relativePath uid gid '
    'mtime ctime btime inode mode filetype '
    'permissions size protection '
    'extended_attributes linktarget digest')

# namedtuple for representing the values in the plist in the file field in iOS10
# Manifest.db databases
_plistvals = _nt(
    'plist_values', 'uid gid mtime ctime btime inode mode '
    'filetype permissions size protection '
    'relpath extended_attributes linktarget '
    'digest')
示例#22
0
            yield value

    def resolve_variable_value_once(var_name):
        return _resolve_variable_value_once(var_name, memo, bcoll, listener)

    def resolve_function_name_and_args():
        return _parse_function_call(expression_string, listener)

    stop = _Stop
    try:
        return main()
    except stop:
        pass


_Funco = _nt('_Funco', ('num_args', 'func', 'locator', 'memo_yikes'))


def _load_function_once(func_name, memo, bcoll, listener):
    dct = _produce_raw_dict(memo, 'value_function', 'funcs_as_unparsed_dict',
                            'VALUE_FUNCTION_RIGHT_HAND_SIDES', bcoll, listener)

    # Get func locator expression right hand side as it appeared in schema.rec
    rhs = dct.get(func_name)
    if rhs is None:
        from pho.magnetics_.text_via import oxford_join as func
        or_list = func(dct.keys(), ' or ')  # does repr
        xx(f"No function {func_name!r}. Did you mean {or_list}?")

    # Split a function locator like this "foo.bar.baz" into "foo.bar", "baz"
    md = _function_locator_rx.match(rhs)
示例#23
0
文件: jaoc.py 项目: ksb2266/evoprog
lexer = lex.lex()
#if sourceCode is not None:
#  lexer.input(sourceCode)
#  if __debug__:
#    for tok in lexer:
#      #print(tok.value, end="  ")
#      print("{0:12} {1}".format(tok.value, tok.type))
#      #print(tok.value)
#    print()

# TODO nested scopes, symbol tables
# TODO simplify resulting structure to eliminate artifacts of handwiring?

from collections import namedtuple as _nt
ASTLiteralInteger = _nt("ASTLiteral", ["value"])
ASTIdentifier = _nt("ASTIdentifier", ["name"])
ASTApplication = _nt("ASTApplication", ["function", "argument"])
ASTAbstraction = _nt("ASTAbstraction", ["parameter", "body"])
ASTLet = _nt("ASTLet", ["variable", "value", "expression"])

def p_literalinteger(p):
  "expression : LITERALINTEGER"
  p[0] = ASTLiteralInteger(int(p[1]))

def p_identifier(p):
  "expression : IDENTIFIER"
  p[0] = ASTIdentifier(p[1])

def p_application(p):
  "expression : APPLICATOR expression expression"
    slot_w = w - _hand_w - _lb_w - _rb_w  # #here1
    assert 0 < slot_w  # assume concretization negotiated fairly
    return ItemContructors()


def _FFSA(fsa_def):  # copy-paste-modify. memoize the FFSA into its own func
    if not hasattr(fsa_def, '_FFSA_'):
        from modality_agnostic.magnetics.formal_state_machine_via_definition \
            import build_formal_FSA_via_definition_function as func
        wti = fsa_def.where_to_insert()
        fsa_def._FFSA_ = func(__name__, fsa_def, where_to_insert=wti)
    return fsa_def._FFSA_


_min_width_and_value = _nt('_min_w_and_val', ('minimum_width', 'value'))
_hand_w = _piece_via_has_focus.width
_this_ratio = 2.75  # keymashing this looked fine: 8 chars : 22 chars


# Imagining:
'  👉  [  chamoochie]:[fachoochie  ]'
'  👉     chamoochie : fachoochie   '
# such that:  [hand][left bracket][name slot][spine][value slot][right bracket]
_lb_w = 1  # left bracket width
_rb_w = _lb_w
_spine_w = _rb_w + len(':') + _lb_w


def xx(msg=None):
    raise RuntimeError(''.join(('cover me', *((': ', msg) if msg else ()))))
示例#25
0
class STScanNumpy():
    """For testing and verification; numpy accelerated.
    Coordinates are as usual, but timestamps
    are just float values, with 0 being the end time, and e.g. 10 being 10
    units into the past.
    """
    def __init__(self, coords, timestamps):
        self.coords = _np.asarray(coords)
        self.timestamps = _np.asarray(timestamps)
        if len(self.timestamps) != self.coords.shape[1]:
            raise ValueError(
                "Timestamps and Coordinates must be of same length.")
        self._sort_times_increasing()
        self.geographic_radius_limit = 100
        self.geographic_population_limit = 0.5
        self.time_max_interval = 28
        self.time_population_limit = 0.5
        self._cache_N = 0

    def _sort_times_increasing(self):
        arg_sort = _np.argsort(self.timestamps)
        self.timestamps = self.timestamps[arg_sort]
        self.coords = self.coords[:, arg_sort]

    def make_time_ranges(self):
        """Compute the posssible time intervals.
        
        :return: Tuple of masks (of shape (N,k) where N is the number of data
          points), counts (of length k) and the cutoff used for each count (of
          length k).  Hence `masks[:,i]` corresponds to `count[i]` is given by
          looking at event `<= cutoff[i]` before the end of time.
        """
        unique_times = _np.unique(self.timestamps)
        unique_times = unique_times[unique_times <= self.time_max_interval]
        unique_times.sort()
        time_masks = self.timestamps[:, None] <= unique_times[None, :]

        limit = self.timestamps.shape[0] * self.time_population_limit
        time_counts = _np.sum(time_masks, axis=0)
        m = time_counts <= limit

        return time_masks[:, m], time_counts[m], unique_times[m]

    def find_discs(self, centre):
        """Compute the possible disks.
        
        :return: Tuple of masks (of shape (N,k) where N is the number of data
          points), counts (of length k) and the distances squared from the
        centre point (of length k).  Hence `masks[:,i]` corresponds to
        `count[i]` is given by looking at event `<= cutoff[i]` before the end
        of time.
        """
        centre = _np.asarray(centre)
        distsq = _np.sum((self.coords - centre[:, None])**2, axis=0)
        unique_dists = _np.unique(distsq)
        unique_dists = unique_dists[
            unique_dists <= self.geographic_radius_limit**2]
        mask = distsq[:, None] <= unique_dists[None, :]

        limit = self.timestamps.shape[0] * self.geographic_population_limit
        space_counts = _np.sum(mask, axis=0)
        m = (space_counts > 1) & (space_counts <= limit)

        return mask[:, m], space_counts[m], unique_dists[m]

    @staticmethod
    def _calc_actual(space_masks, time_masks, time_counts):
        # Does this, but >9 times quicker:
        # uber_mask = space_masks[:,:,None] & time_masks[:,None,:]
        # actual = _np.sum(uber_mask, axis=0)
        x = _np.empty((space_masks.shape[1], time_masks.shape[1]))
        # This is better, but still >20 times slower...
        #for i, c in enumerate(time_counts):
        #    x[:,i] = _np.sum(space_masks[:c,:], axis=0)
        current_sum = _np.zeros(space_masks.shape[1])
        current_column = 0
        for i, c in enumerate(time_counts):
            while current_column < c:
                current_sum += space_masks[current_column, :]
                current_column += 1
            x[:, i] = current_sum
        return x

    def faster_score_all_new(self):
        """As :method:`score_all` but yields tuples (centre, distance_array,
        time_array, statistic_array)."""
        time_masks, time_counts, times = self.make_time_ranges()
        N = self.timestamps.shape[0]
        for centre in self.coords.T:
            space_masks, space_counts, dists = self.find_discs(centre)

            actual = self._calc_actual(space_masks, time_masks, time_counts)
            expected = space_counts[:, None] * time_counts[None, :] / N
            _mask = (actual > 1) & (actual > expected)
            actual = _np.ma.array(actual, mask=~_mask)
            expected = _np.ma.array(expected, mask=~_mask)
            stats = self._ma_statistic(actual, expected, N)
            _mask1 = _np.any(_mask, axis=1)
            if not _np.any(_mask1):
                continue
            m = _np.ma.argmax(stats, axis=1)[_mask1]
            stats = stats[_mask1, :]
            stats = stats[range(stats.shape[0]), m].data
            used_dists = dists[_mask1]
            used_times = times[m]

            yield centre, used_dists, used_times, stats

    @staticmethod
    def _ma_statistic(actual, expected, total):
        """Calculate the log likelihood"""
        stat = actual * (_np.ma.log(actual) - _np.ma.log(expected))
        stat += (total - actual) * (_np.ma.log(total - actual) -
                                    _np.ma.log(total - expected))
        return stat

    @staticmethod
    def _calc_actual1(space_masks, time_masks, time_counts):
        x = _np.empty((space_masks.shape[1], time_masks.shape[1]),
                      dtype=_np.int)
        current_sum = _np.zeros(space_masks.shape[1], dtype=_np.int)
        current_column = 0
        for i, c in enumerate(time_counts):
            while current_column < c:
                current_sum += space_masks[current_column, :]
                current_column += 1
            x[:, i] = current_sum
        return x

    def faster_score_all(self):
        """As :method:`score_all` but yields tuples (centre, distance_array,
        time_array, statistic_array)."""
        time_masks, time_counts, times = self.make_time_ranges()
        N = self.timestamps.shape[0]
        for centre in self.coords.T:
            space_masks, space_counts, dists = self.find_discs(centre)

            actual = self._calc_actual1(space_masks, time_masks, time_counts)

            stcounts = space_counts[:, None] * time_counts[None, :]
            _mask = (actual > 1) & (N * actual > stcounts)
            stats = self._ma_statistics_lookup(space_counts, time_counts,
                                               stcounts, actual, _mask, N)
            _mask1 = _np.any(_mask, axis=1)
            if not _np.any(_mask1):
                continue
            m = _np.ma.argmax(stats, axis=1)[_mask1]
            stats = stats[_mask1, :]
            stats = stats[range(stats.shape[0]), m].data
            used_dists = dists[_mask1]
            used_times = times[m]

            yield centre, used_dists, used_times, stats

    @staticmethod
    def _build_log_lookup(N):
        lookup = _np.empty(N + 1, dtype=_np.float64)
        lookup[0] = 1
        for i in range(1, N + 1):
            lookup[i] = i
        return _np.log(lookup)

    def _ma_statistics_lookup(self, space_counts, time_counts, stcounts,
                              actual, _mask, N):
        # Faster version which uses lookup tables
        if self._cache_N != N:
            self._cache_N = N
            self._log_lookup = self._build_log_lookup(N)
            if N > 2000:
                self._log_lookup2 = None
            else:
                self._log_lookup2 = self._build_log_lookup(N * N)
        sl = self._log_lookup[space_counts]
        tl = self._log_lookup[time_counts]
        y = actual * (self._log_lookup[actual] - sl[:, None] - tl[None, :])
        if self._log_lookup2 is None:
            yy = (N - actual) * (self._log_lookup[N - actual] -
                                 _np.log(N * N - stcounts))
        else:
            yy = (N - actual) * (self._log_lookup[N - actual] -
                                 self._log_lookup2[N * N - stcounts])
        return _np.ma.array(y + yy + N * _np.log(N), mask=~_mask)

    def faster_score_all_old(self):
        """As :method:`score_all` but yields tuples (centre, distance_array,
        time_array, statistic_array)."""
        time_masks, time_counts, times = self.make_time_ranges()
        N = self.timestamps.shape[0]
        for centre in self.coords.T:
            space_masks, space_counts, dists = self.find_discs(centre)

            uber_mask = space_masks[:, :, None] & time_masks[:, None, :]

            actual = _np.sum(uber_mask, axis=0)
            expected = space_counts[:, None] * time_counts[None, :] / N
            _mask = (actual > 1) & (actual > expected)

            used_dists = _np.broadcast_to(dists[:, None], _mask.shape)[_mask]
            used_times = _np.broadcast_to(times[None, :], _mask.shape)[_mask]
            actual = actual[_mask]
            expected = expected[_mask]
            stats = AbstractSTScan._statistic(actual, expected, N)

            if len(stats) > 0:
                yield centre, used_dists, used_times, stats

    def score_all(self):
        """Consider all possible space and time regions (which may include many
        essentially repeated disks) and yield tuples of the centre of disk, the
        radius squared of the disk, the time span of the region, and the 
        statistic.
        """
        for centre, dists, times, stats in self.faster_score_all():
            for d, t, s in zip(dists, times, stats):
                yield centre, d, t, s

    @staticmethod
    def _not_intersecting(scores, centre, radius):
        return [
            cc for cc in scores if (cc[0] - centre[0])**2 +
            (cc[1] - centre[1])**2 > (radius + cc[2])**2
        ]

    Result = _nt("Result", ["centre", "radius", "time", "statistic"])

    def find_all_clusters(self):
        scores = []
        count = 0
        for centre, dists, times, stats in self.faster_score_all():
            dists = _np.sqrt(dists)
            scores.extend(
                zip(_itertools.repeat(centre[0]), _itertools.repeat(centre[1]),
                    dists, times, stats))
            count += 1
        if len(scores) == 0:
            return
        scores = _np.asarray(scores)
        if len(scores.shape) == 1:
            scores = scores[None, :]
        scores = scores[_np.argsort(-scores[:, 4]), :]

        while scores.shape[0] > 0:
            best = scores[0]
            centre = _np.asarray([best[0], best[1]])
            radius = best[2]
            yield self.Result(centre=centre,
                              radius=radius,
                              time=best[3],
                              statistic=best[4])
            distances = (scores[:, 0] - best[0])**2 + (scores[:, 1] -
                                                       best[1])**2
            mask = distances > (radius + scores[:, 2])**2
            scores = scores[mask, :]
示例#26
0
        if has:
            intersections = _intersections(use_insides, use_overhangs,
                                           use_kissing)

        left_thing = left_scn.next()
        yield left_thing, intersections


_when_overhang = {  # key, do consume, do stay
    (False, False): None,
    (False, True): ('at_stop', False, False),
    (True, False): ('at_start', True, True),
    (True, True): ('full', False, False),
}

_intersections = _nt('_Intersections',
                     ('inside_or_flush', 'overhangs', 'kissing'))


def _relationship(one, other):
    if one < other:
        return 'overhang'
    if other < one:
        return 'inside'
    assert one == other
    return 'equal'


def _check_ranges(itr):
    tup = next(itr)
    start, prev_stop = tup[:2]
    assert start <= prev_stop
示例#27
0
#!/usr/bin/env python3 -tt
"""
File: point.h
-------------
Exports a type representing 2- and 3-dimensional points.
These points inherit from tuples, so you can do anything
you could otherwise do with tuples (notably, tuple unpacking)

Usage:

    origin = Point()
    pt = Point(4, 1)
    x = origin.x  # => x = 0
    y = pt.y      # => y = 1

    origin3 = Point3()
    pt = Point3(3, 4, 5)
    x, y, z = pt

"""
from collections import namedtuple as _nt

Point = _nt('Point', ['x', 'y'])
Point3 = _nt('Point', ['x', 'y', 'z'])
示例#28
0
        def condition_two():
            pass

        def condition_three():
            pass

        def no_see():
            self.fail("no see")

        listener, emis = em.listener_and_emissions_for(self)
        case = subject_fellow()(listener)
        val = case(when_chim_churry_is)
        assert val is None
        emi, = emis
        return emi.to_messages()


EndState = _nt('EndState', ('emissions', 'recordings', 'result_value'))


def subject_fellow():
    from kiss_rdb.magnetics_.collection_via_path import _build_case_function
    return _build_case_function


if __name__ == '__main__':
    unittest.main()

# #born
示例#29
0
    yield ('--from-arg=<arg>*',
           'EXPERIMENTAL pass thru to producer script (yikes)')

    # (using '-t' is sad, we would rather have again '-f' like ffmpeg
    # but that would require custom arg parsing that is way out of scope.
    # it's basically `[opts] arg` twice. #wish [#459.T])
    yield '-t', f'{_to_monikers.format_flag}=<fmt>', _same_help

    from data_pipes.cli import this_screen_ as this_screen
    yield '-h', '--help', this_screen
    yield _from_monikers.arg, 'the collection the data comes from'
    yield _to_monikers.arg, 'the collection the data goes to'


_monikers = _nt('Monikers', ('arg', 'format_flag', 'STDIN_STDOUT'))
_from_monikers = _monikers('FROM_COLLECTION', '--from-format', 'STDIN')
_to_monikers = _monikers('TO_COLLECTION', '--to-format', 'STDOUT')

_same_help = "(or will try to infer from file extension if present)"


def BUILD_COLLECTION_MAPPER(stderr, vals, foz, rscser):
    """Harness the power of pipes…"""

    # ..
    """EXPERIMENTAL. Created as a cheap-and-easy way to create and populate
    a collection with a producer script or similar.

    With FROM_COLLECTION of "-", lines of the indicated format are read from
    STDIN. With TO_COLLECTION of "-", lines of the specified format are
示例#30
0
# Type used when actual type cannot be determined.
# While typing.Any could be used here, and would be valid
# we need a distinction between explicitly added typing.Any
# so typing additions can be treated differently to typing changes.
UNKNOWN = "~unknown"


# fmt: off
class Kind(object):
    POSITIONAL = 0b0001
    KEYWORD = 0b0010
    VARIADIC = 0b0100
    DEFAULT = 0b1000


class API(object):
    Var = _nt("Var", ("name", "type"))
    Arg = _nt("Arg", ("name", "type", "kind"))
    Func = _nt("Func", ("name", "args", "returns"))
    Class = _nt("Class", ("name", "path", "body"))
    Module = _nt("Module", ("name", "path", "body"))
    Unknown = _nt("Unknown", ("name", "type", "info"))


# fmt: on

Change = _nt("Change", ("level", "type", "info"))

TYPE_CHARS = r"\w[\w\.]*(?:\[[\w\.,\[\] ]*\])?"
示例#31
0
from datetime import datetime
import numpy as np
import struct
from collections import namedtuple as _nt
import sys

N_TM = 32  # Number of target modules in camera
N_TM_PIX = 64  # Number of pixels on a Target Module
N_BYTES_NUM = 8  # Number of bytes to encode numbers (uint and float) in the SSReadout
N_CAM_PIX = N_TM * N_TM_PIX  # Number of pixels in the camera

# fmt: off
_SSMappings = _nt("SSMappings", "ssl2colrow ssl2asic_ch")
ss_mappings = _SSMappings(
    np.array([
        8,
        1,
        5,
        6,
        41,
        43,
        51,
        47,
        11,
        14,
        13,
        7,
        44,
        36,
        42,
        46,
示例#32
0
"""
Created on 31 Jul 2011

@author: D.W.
"""

import wx
from collections import namedtuple as _nt

_id_text= _nt("id_text", "menu toolbar status menu_kind")

#===============================================================================
#  File ID
#===============================================================================
ID_NEW= wx.NewId()
id_text_new= _id_text("New\tCtrl+N", "New",
                      "Create a new file", wx.ITEM_NORMAL)

ID_OPEN= wx.NewId()
id_text_open= _id_text("Open\tCtrl+O", "Open",
                       "Open an existing file", wx.ITEM_NORMAL)

ID_SAVE= wx.NewId()
id_text_save= _id_text("Save\tCtrl+S", "Save",
                       "Save the current file", wx.ITEM_NORMAL)

ID_SAVEAS= wx.NewId()
id_text_saveas= _id_text("Save as", "Save as",
                         "Save as a new filename", wx.ITEM_NORMAL)

ID_CLOSETAB= wx.NewId()