Ejemplo n.º 1
1
def iterunique(source, key):
    # assume source is sorted
    # first need to sort the data
    it = iter(source)

    hdr = next(it)
    yield tuple(hdr)

    # convert field selection into field indices
    if key is None:
        indices = range(len(hdr))
    else:
        indices = asindices(hdr, key)

    # now use field indices to construct a _getkey function
    # N.B., this may raise an exception on short rows, depending on
    # the field selection
    getkey = operator.itemgetter(*indices)

    prev = next(it)
    prev_key = getkey(prev)
    prev_comp_ne = True

    for curr in it:
        curr_key = getkey(curr)
        curr_comp_ne = curr_key != prev_key
        if prev_comp_ne and curr_comp_ne:
            yield tuple(prev)
        prev = curr
        prev_key = curr_key
        prev_comp_ne = curr_comp_ne

    # last one?
    if prev_comp_ne:
        yield prev
def test_connected(grid, start, tolerence=1):
    mark = np.in1d(grid, [
                        color_code_map_inv['w'], 
                        color_code_map_inv['g']
                    ]
            ).reshape(grid.shape)
    frontier = [start]
    mark[tuple(start)] = True
    mark_cnt = 1
    needed_mark_count = math.floor((mark.size - np.count_nonzero(mark)) * tolerence)
    while len(frontier) > 0:
        loc = frontier.pop()
        for i in range(len(grid.shape)):
            l = np.copy(loc)
            l[i] = loc[i] - 1
            if l[i] >= 0 and not mark[tuple(l)]:
                mark[tuple(l)] = True
                mark_cnt += 1
                frontier.append(np.copy(l))
            l[i] = loc[i] + 1
            if l[i] < grid.shape[i] and not mark[tuple(l)]:
                mark[tuple(l)] = True
                mark_cnt += 1
                frontier.append(np.copy(l))
        if mark_cnt >= needed_mark_count:
            return True
    return False
Ejemplo n.º 3
0
def tokens_ngram_processor(tokens, ngram_len):
    """
    Given a `tokens` sequence or iterable of Tokens, return an iterator of
    tuples of Tokens where the tuples length is length `ngram_len`. Buffers at
    most `ngram_len` iterable items. The returned tuples contains
    either `ngram_len` items or less for these cases where the number of tokens
    is smaller than `ngram_len`:

    - between the beginning of the stream and a first gap
    - between a last gap and the end of the stream
    - between two gaps
    In these cases, shorter ngrams can be returned.
    """
    ngram = deque()
    for token in tokens:
        if len(ngram) == ngram_len:
            yield tuple(ngram)
            ngram.popleft()
        if token.gap:
            ngram.append(token)
            yield tuple(ngram)
            # reset
            ngram.clear()
        else:
            ngram.append(token)
    if ngram:
        # yield last ngram
        yield tuple(ngram)
Ejemplo n.º 4
0
def check_compatible(client, min_version=None, max_version=None):
    """Checks if a kazoo client is backed by a zookeeper server version.

    This check will verify that the zookeeper server version that the client
    is connected to satisfies a given minimum version (inclusive) and
    maximum (inclusive) version range. If the server is not in the provided
    version range then a exception is raised indiciating this.
    """
    server_version = None
    if min_version:
        server_version = tuple((int(a) for a in client.server_version()))
        min_version = tuple((int(a) for a in min_version))
        if server_version < min_version:
            pretty_server_version = ".".join([str(a) for a in server_version])
            min_version = ".".join([str(a) for a in min_version])
            raise exc.IncompatibleVersion("Incompatible zookeeper version"
                                          " %s detected, zookeeper >= %s"
                                          " required" % (pretty_server_version,
                                                         min_version))
    if max_version:
        if server_version is None:
            server_version = tuple((int(a) for a in client.server_version()))
        max_version = tuple((int(a) for a in max_version))
        if server_version > max_version:
            pretty_server_version = ".".join([str(a) for a in server_version])
            max_version = ".".join([str(a) for a in max_version])
            raise exc.IncompatibleVersion("Incompatible zookeeper version"
                                          " %s detected, zookeeper <= %s"
                                          " required" % (pretty_server_version,
                                                         max_version))
Ejemplo n.º 5
0
 def __new__(cls, spec):
     if isinstance(spec, cls):
         return spec
     self = object.__new__(cls)
     self.spec = spec
     if isinstance(spec, tuple):
         self.match = self.all_match_ if spec[0] == 'all' else self.any_match_
     elif '|' in spec:
         return VersionSpec(('any', tuple(VersionSpec(s) for s in spec.split('|'))))
     elif ',' in spec:
         return VersionSpec(('all', tuple(VersionSpec(s) for s in spec.split(','))))
     elif spec.startswith(('=', '<', '>', '!')):
         m = version_relation_re.match(spec)
         if m is None:
             raise RuntimeError('Invalid version spec: %s' % spec)
         op, b = m.groups()
         self.op = opdict[op]
         self.cmp = VersionOrder(b)
         self.match = self.veval_match_
     else:
         self.spec = spec
         rx = spec.replace('.', r'\.')
         rx = rx.replace('+', r'\+')
         rx = rx.replace('*', r'.*')
         rx = r'(%s)$' % rx
         self.regex = re.compile(rx)
         self.match = self.regex_match_
     return self
 def _get_sales_data(self, cr, uid, period_ids, curr_period, sale_id, context=None):
     if not curr_period:
         curr_p = self.pool.get('account.period').find(cr, uid, context=context)[0]
         period_ids = [x for x in period_ids if x != curr_p]
     if not sale_id:
         sql = """
             select ml.partner_id as pid, ml.period_id as period, p.name as pnm, p.is_company as company, sum(ml.credit - ml.debit) as amount
             from account_move_line ml
             left join res_partner p on (ml.partner_id = p.id)
             left join account_account ac on (ml.account_id = ac.id)
             where ml.period_id in %s
             and ac.reports = True
             group by ml.partner_id, ml.period_id, p.name, p.is_company
             order by ml.partner_id, ml.period_id
             """ % (str(tuple(period_ids)))
     else:
         sql = """
             select ml.partner_id as pid, ml.period_id as period, p.name as pnm, p.is_company as company, sum(ml.credit - ml.debit) as amount
             from account_move_line ml
             left join account_invoice inv on (ml.move_id = inv.move_id)
             left join res_partner p on (ml.partner_id = p.id)
             left join account_account ac on (ml.account_id = ac.id)
             where ml.period_id in %s
             and inv.user_id = %s
             and ac.reports = True
             group by ml.partner_id, ml.period_id, p.name, p.is_company
             order by ml.partner_id, ml.period_id
             """ % (str(tuple(period_ids)), sale_id)
     cr.execute(sql)
     return cr.dictfetchall()
Ejemplo n.º 7
0
    def emit_Pool(self, IR_node):
        dim = len(IR_node.get_attr('strides')) - 2

        if IR_node.get_attr('pooling_type') == "MAX":
            pool_name = "max_pool{}d".format(dim)
            exstr = ", value=float('-Inf')"
        elif IR_node.get_attr('pooling_type') == "AVG":
            pool_name = "avg_pool{}d".format(dim)
            exstr = ""
        else:
            assert False

        if IR_node.layer.attr['global_pooling'].b:
            self.add_body(2, "{:<15} = F.{}(input = {}, kernel_size = {}.size()[2:])".format(
                IR_node.variable_name,
                pool_name,
                self.parent_variable_name(IR_node),
                self.parent_variable_name(IR_node)
            ))

        else:
            for e in IR_node.get_attr('dilations', []):
                assert e == 1

            pool_size = IR_node.get_attr('kernel_shape')[1:-1]
            strides = IR_node.get_attr('strides')[1:-1]

            input_node = self._defuse_padding(IR_node, exstr)
            self.add_body(2, "{:<15} = F.{}({}, kernel_size={}, stride={})".format(
                IR_node.variable_name,
                pool_name,
                input_node,
                tuple(pool_size),
                tuple(strides)
                ))
Ejemplo n.º 8
0
def solve(par):
    C, combine, D, opposite, N, S = par
    comb = {}
    for c in combine:
        x = list(c)[:2]
        comb[tuple(x)] = c[2]
        x.reverse()
        comb[tuple(x)] = c[2]
    oppo = defaultdict(list)
    for o in opposite:
        oppo[o[0]].append(o[1])
        oppo[o[1]].append(o[0])
    
    result = []
    for s in list(S):
        if len(result) > 0 and (result[-1], s) in comb:
            c = result[-1]
            result.pop()
            result.append(comb[(c, s)])
            continue
        
        flag = True
        if s in oppo:
            for x in oppo[s]:
                if x in result:
                    result = []
                    flag = False
                    break
        if flag:
            result.append(s)
            
    return '[' + ', '.join(result) + ']'
Ejemplo n.º 9
0
    def load(cls, dirname=''):
        normdir = os.path.normpath(dirname)
        code, data = _run_command(['svn', 'info', normdir])
        # Must check for some contents, as some use empty directories
        # in testcases
        svn_dir = os.path.join(normdir, '.svn')
        has_svn = (os.path.isfile(os.path.join(svn_dir, 'entries')) or
                   os.path.isfile(os.path.join(svn_dir, 'dir-props')) or
                   os.path.isfile(os.path.join(svn_dir, 'dir-prop-base')))

        svn_version = tuple(cls.get_svn_version().split('.'))

        try:
            base_svn_version = tuple(int(x) for x in svn_version[:2])
        except ValueError:
            base_svn_version = tuple()

        if has_svn and (code or not base_svn_version 
                             or base_svn_version < (1, 3)):
            warnings.warn(("No SVN 1.3+ command found: falling back "
                           "on pre 1.7 .svn parsing"), DeprecationWarning)
            return SvnFileInfo(dirname)
        elif not has_svn:
            return SvnInfo(dirname)
        elif base_svn_version < (1, 5):
            return Svn13Info(dirname)
        else:
            return Svn15Info(dirname)
Ejemplo n.º 10
0
    def emit_Conv(self, IR_node):
        self.used_layers.add(IR_node.type)

        dim = len(IR_node.get_attr('strides')) - 2

        in_channels = IR_node.get_attr('kernel_shape')[-2]
        filter = IR_node.get_attr('kernel_shape')[-1]
        kernel = IR_node.get_attr('kernel_shape')[:-2]
        strides = IR_node.get_attr('strides')[1:-1]

        self.add_init(2, "self.{} = self.__conv({}, name='{}', in_channels={}, out_channels={}, kernel_size={}, stride={}, groups={}, bias={})".format(
            IR_node.variable_name,
            dim,
            IR_node.name,
            in_channels,
            filter,
            tuple(kernel),
            tuple(strides),
            # padding,
            IR_node.get_attr('group', 1),
            IR_node.get_attr('use_bias')))

        input_node = self._defuse_padding(IR_node)
        self.add_body(2, "{:<15} = self.{}({})".format(
            IR_node.variable_name,
            IR_node.variable_name,
            input_node))

        if self.weight_loaded:
            self.weights_dict[IR_node.name]['weights'] = np.transpose(self.weights_dict[IR_node.name]['weights'], [dim + 1, dim] + list(range(0, dim)))
Ejemplo n.º 11
0
 def changeFromProperties(self, props):
     '''
     Returns 1 if changes were specified.
     '''
     if props is None:
         return 0
     res = 0
     s = props.get('guard_permissions', None)
     if s:
         res = 1
         p = [ permission.strip() for permission in s.split(';') ]
         self.permissions = tuple(p)
     s = props.get('guard_roles', None)
     if s:
         res = 1
         r = [ role.strip() for role in s.split(';') ]
         self.roles = tuple(r)
     s = props.get('guard_groups', None)
     if s:
         res = 1
         g = [ group.strip() for group in s.split(';') ]
         self.groups = tuple(g)
     s = props.get('guard_expr', None)
     if s:
         res = 1
         self.expr = Expression(s)
     return res
Ejemplo n.º 12
0
def test_point_slicing_with_full_slice():
    from dask.array.core import _vindex_transpose, _get_axis
    x = np.arange(4*5*6*7).reshape((4, 5, 6, 7))
    d = da.from_array(x, chunks=(2, 3, 3, 4))

    inds = [
            [[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
            [[1, 2, 3], None, [4, 3, 2], None],
            [[1, 2, 3], [3, 2, 1]],
            [[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
            [[], [], [], None],
            [np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
            [None, None, [1, 2, 3], [4, 3, 2]],
            [None, [0, 2, 3], None, [0, 3, 2]],
            ]

    for ind in inds:
        slc = [i if isinstance(i, (np.ndarray, list)) else slice(None, None)
                for i in ind]
        result = d.vindex[tuple(slc)]

        # Rotate the expected result accordingly
        axis = _get_axis(ind)
        expected = _vindex_transpose(x[tuple(slc)], axis)

        assert eq(result, expected)

        # Always have the first axis be the length of the points
        k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
        assert result.shape[0] == k
Ejemplo n.º 13
0
    def __new__(cls, fields, values):
        fields = tuple(fields)
        values = tuple(values)

        if len(fields) != len(values):
            raise ValueError(
                "`fields` and `values` must have matching length: %d != %d" %
                (len(fields), len(values)))

        # Create tuple instance, store fields, and create read-only attributes
        # for each field name. Fields must be stored for pickling/copying (see
        # `__getnewargs__`).
        #
        # Note: setting field names as attributes allows for tab-completion in
        # interactive contexts! Using `__getattr__` does not support this.
        self = super().__new__(cls, values)

        # Must set attributes this way because `__setattr__` prevents
        # setting directly (necessary for immutability).
        object.__setattr__(self, '_fields', fields)

        # Attach field names as instance attributes.
        for field, value in zip(fields, values):
            object.__setattr__(self, field, value)

        return self
Ejemplo n.º 14
0
    def __init__(self, n):
        """ Create a RangeCount with a given size.

        :param n: number of counters
        :type n: int
        """

        if not n > 0:
            raise ValueError("the number of counters must be positive.")

        trees = []
        offsets = []
        self._n = n

        base = 1
        offset = 0
        while base <= self._n:
            if self._n & base:
                trees.append(_FCTree(base))
                offsets.append(offset)
                offset += base
            base <<= 1

        self._trees = tuple(trees)
        self._offsets = tuple(offsets)
        self._lock = threading.Lock()
Ejemplo n.º 15
0
def chunks(tags):   # TODO: option: does O count as a singleton chunk?
    '''
    >>> list(chunks(['O', 'B-evt', 'o', 'b-PER', 'I', 'I', 'B-PER', 'O', 'B-ORG', 'I-ORG'])) \
        #             0    1        2    3        4    5    6        7    8        9
    [(3,), (1, 4, 5), (6,), (8, 9)]
    '''
    ochk = []
    ichk = None
    for i,t in enumerate(tags):
        if isInGap(t):
            if ichk is None:
                assert not isInside(t)
            else:
                if isInside(t):
                    ichk.append(i)
                elif ichk:
                    yield tuple(ichk)
                    ichk = []
            if isBegin(t):
                ichk = [i]
        else:
            if ichk: yield tuple(ichk)  # post-gap
            ichk = None
            if isInside(t):
                ochk.append(i)
            elif ochk:
                yield tuple(ochk)
                ochk = []
            if isBegin(t):
                ochk = [i]
    assert ichk is None
    if ochk: yield tuple(ochk)
def import_dataset(filename):
    store = []
    settings = {}

    with open(filename, 'r') as f:

        line = f.readline().rstrip('\n')

        # Check if the file has a settings header and read out
        if line[:5] == 'START':
            line = f.readline().rstrip('\n')
            while line[:3] != 'END':
                parts = line.split(',')
                settings[parts[0]] = parts[1]
                line = f.readline().rstrip('\n')
            line = f.readline().rstrip('\n')

        # Extract the column names and data formats
        names = tuple(line.split(','))
        line = f.readline().rstrip('\n')
        formats = tuple(line.split(','))

        # Populate the store with data fields
        for line in f.readlines():
            parts = line.rstrip('\n').split(',')
            store.append(tuple(parts))

    # File is now closed
    out = np.array(store, dtype={'names': names, 'formats': formats})
    return out, settings
Ejemplo n.º 17
0
 def pipeOpen(self, cmd, *args, **flags):
     l = tuple(cmd.split(" "))
     for (k, v) in flags.items():
         if v is not None:
             l += len(k) == 1 and ("-%s" % (k,), str(v)) or ("--%s=%s" % (k, v),)
     l += tuple(args)
     return sp.Popen(tuple(a for a in l if a), stdout=sp.PIPE, stderr=sp.PIPE)
Ejemplo n.º 18
0
	def writeNetworkConfig(self):
		self.configuredInterfaces = []
		fp = file('/etc/network/interfaces', 'w')
		fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
		fp.write("auto lo\n")
		fp.write("iface lo inet loopback\n\n")
		for ifacename, iface in self.ifaces.items():
			if iface['up'] == True:
				fp.write("auto " + ifacename + "\n")
				self.configuredInterfaces.append(ifacename)
			if iface['dhcp'] == True:
				fp.write("iface "+ ifacename +" inet dhcp\n")
			if iface['dhcp'] == False:
				fp.write("iface "+ ifacename +" inet static\n")
				if iface.has_key('ip'):
					print tuple(iface['ip'])
					fp.write("	address %d.%d.%d.%d\n" % tuple(iface['ip']))
					fp.write("	netmask %d.%d.%d.%d\n" % tuple(iface['netmask']))
					if iface.has_key('gateway'):
						fp.write("	gateway %d.%d.%d.%d\n" % tuple(iface['gateway']))
			if iface.has_key("configStrings"):
				fp.write(iface["configStrings"])
			if iface["preup"] is not False and not iface.has_key("configStrings"):
				fp.write(iface["preup"])
			if iface["predown"] is not False and not iface.has_key("configStrings"):
				fp.write(iface["predown"])
			fp.write("\n")
		fp.close()
		self.configuredNetworkAdapters = self.configuredInterfaces
		self.writeNameserverConfig()
Ejemplo n.º 19
0
    def mergeConcat_branch(self, concatnode):
        nodelist = []
        last_node = None
        for n in concatnode.nextnodes:
            if last_node is None: pass
            elif not isinstance(last_node, DFBranch): pass
            elif not isinstance(n, DFBranch): pass
            elif last_node.condnode == n.condnode:
                truenode_list = (last_node.truenode, n.truenode)
                falsenode_list = (last_node.falsenode, n.falsenode)
                new_truenode_list = []
                new_falsenode_list = []
                pos = 0
                for t in truenode_list:
                    if t is None: new_truenode_list.append(DFUndefined(self.getWidth(falsenode_list[pos])))
                    else: new_truenode_list.append(t)
                    pos += 1

                pos = 0
                for f in falsenode_list:
                    if f is None: new_falsenode_list.append(DFUndefined(self.getWidth(truenode_list[pos])))
                    else: new_falsenode_list.append(f)
                    pos += 1

                new_node = DFBranch(last_node.condnode, DFConcat(tuple(new_truenode_list)), DFConcat(tuple(new_falsenode_list)))
                last_node = new_node
                nodelist.pop()
                nodelist.append(new_node)
                continue
            nodelist.append(n)
            last_node = n
        if len(nodelist) == 1: return nodelist[0]
        return DFConcat(tuple(nodelist))
Ejemplo n.º 20
0
    def _do_fetch(self):
        assert not self.results

        # If we're manually excluding PKs, and we've specified a limit to the results
        # we need to make sure that we grab more than we were asked for otherwise we could filter
        # out too many! These are again limited back to the original request limit
        # while we're processing the results later
        excluded_pk_count = 0
        if self.excluded_pks and self.limits[1]:
            excluded_pk_count = len(self.excluded_pks)
            self.limits = tuple([self.limits[0], self.limits[1] + excluded_pk_count])

        self.results = self._run_query(
            aggregate_type=self.aggregate_type,
            start=self.limits[0],
            limit=None if self.limits[1] is None else (self.limits[1] - (self.limits[0] or 0))
        )

        # Ensure that the results returned is reset
        self.results_returned = 0

        if excluded_pk_count:
            # Reset the upper limit if we adjusted it above
            self.limits = tuple([self.limits[0], self.limits[1] - excluded_pk_count])

        self.query_done = True
Ejemplo n.º 21
0
    def sortEdgesForBiasSeed(part, edges, center):
        '''
        Arguments:
        part: reference to the part object
        edges: a tuple of edges 
        center: a Point instance indicating the center of the fine mesh region

        This function returns the tuples e1 and e2 which corresponds to the arguments needed in seedEdgeByBias()
        where end1Edges = e1 and end2Edges = e2. In the seedEdgeByBias(), the smallest elements are 
        positioned near the first vertice on the edge for edges passed in to end1Edges and visa versa.
        '''

        # Check that e is a tuple of edges TODO
        # Check that c is a tuple of three floats TODO

        e1 = list()
        e2 = list()

        for e in edges:
            (v1, v2) = e.getVertices()
            v1Distance = Point.distance(Point.fromVertex(part.vertices[v1]), center)
            v2Distance = Point.distance(Point.fromVertex(part.vertices[v2]), center)

            if v1Distance < v2Distance:
                e1.append(e)
            else:
                e2.append(e)

        return (tuple(e1), tuple(e2))
Ejemplo n.º 22
0
def save_log( filename, bg, log_item ):
    """Saves band gaps, valid flags, eigenfrequencies."""

    fd = open( filename, 'w' )
    freq_range = bg.freq_range_margins
    fd.write( log_item )
    fd.write( 'squared: %s\n' % False )
    fd.write( 'n_zeroed: %d\n' % bg.n_zeroed )
    fd.write( 'n_eigs: %d\n' % bg.n_eigs )
    fd.write( 'f0 f1 flag_min f_min v_min flag_max f_max v_max'
              ' kind\ndesc\n' )
    format = "%f %f %d %f %f %d %f %f %s\n%s\n"

    n_row = len( freq_range ) - 1
    fd.write( '%d\n' % n_row )
    for ir in xrange( n_row ):
        f0, f1 = freq_range[[ir, ir+1]]
        gmin, gmax = bg.gaps[ir]
        fd.write( format % ((f0, f1) + tuple( gmin ) + tuple( gmax )
                            + bg.kinds[ir]) )

    fd.write( 'valid resonance\n' )
    freq_range = bg.freq_range_initial
    n_row = len( freq_range )
    fd.write( '%d\n' % n_row )
    valid_in_range = bg.valid[bg.eig_range]
    for ir in xrange( n_row ):
        fd.write( '%d %f\n' % (valid_in_range[ir], freq_range[ir] ) )
    fd.close()
Ejemplo n.º 23
0
 def _all_commands(self):
     path = builtins.__xonsh_env__.get('PATH', [])
     # did PATH change?
     path_hash = hash(tuple(path))
     cache_valid = path_hash == self._path_checksum
     self._path_checksum = path_hash
     # did aliases change?
     al_hash = hash(tuple(sorted(builtins.aliases.keys())))
     self._alias_checksum = al_hash
     cache_valid = cache_valid and al_hash == self._alias_checksum
     pm = self._path_mtime
     # did the contents of any directory in PATH change?
     for d in filter(os.path.isdir, path):
         m = os.stat(d).st_mtime
         if m > pm:
             pm = m
             cache_valid = False
     self._path_mtime = pm
     if cache_valid:
         return self._cmds_cache
     allcmds = set()
     for d in filter(os.path.isdir, path):
         allcmds |= set(os.listdir(d))
     allcmds |= set(builtins.aliases.keys())
     self._cmds_cache = frozenset(allcmds)
     return self._cmds_cache
Ejemplo n.º 24
0
    def test_reduce_inner(numpy_reduce_func, nd_reduce_func, multi_axes):
        for i in range(sample_num):
            ndim = np.random.randint(1, 6)
            shape = np.random.randint(1, 11, size=ndim)
            dat = np.random.rand(*shape) - 0.5
            keepdims = np.random.randint(0, 2)
            if multi_axes:
                axis_flags = np.random.randint(0, 2, size=ndim)
                axes = []
                for (axis, flag) in enumerate(axis_flags):
                    if flag:
                        axes.append(axis)
                if 0 == len(axes):
                    axes = tuple(range(ndim))
                else:
                    axes = tuple(axes)
            else:
                axes = np.random.randint(0, ndim)
            numpy_ret = numpy_reduce_func(dat, axis=axes, keepdims=keepdims)

            ndarray_ret = nd_reduce_func(mx.nd.array(dat), axis=axes, keepdims=keepdims)
            if type(ndarray_ret) is mx.ndarray.NDArray:
                ndarray_ret = ndarray_ret.asnumpy()
            assert (ndarray_ret.shape == numpy_ret.shape) or \
                   (ndarray_ret.shape == (1,) and numpy_ret.shape == ()), "nd:%s, numpy:%s" \
                                                         %(ndarray_ret.shape, numpy_ret.shape)
            err = np.square(ndarray_ret - numpy_ret).mean()
            assert err < 1E-4
Ejemplo n.º 25
0
 def get_form_count(root_node, form_id):
     # Check if nodes are already linked to entities before offering to remove a node from form
     node_ids = NodeMapper.get_all_sub_ids(root_node, [])
     if not node_ids:  # There are no sub nodes so skipping test
         return
     g.cursor.execute("SELECT name FROM web.form WHERE id = %(form_id)s;", {'form_id': form_id})
     form_name = g.cursor.fetchone()[0]
     system_type = ''
     class_code = ''
     if form_name == 'Source':
         system_type = 'source content'
     elif form_name == 'Event':
         class_code = app.config['CLASS_CODES']['event']
     elif form_name == 'Person':
         class_code = ['E21']
     elif form_name == 'Group':
         class_code = ['E74']
     elif form_name == 'Legal Body':
         class_code = ['E40']
     else:
         system_type = form_name.lower()
     sql = """
         SELECT count(*) FROM model.link l
         JOIN model.entity e ON l.domain_id = e.id AND l.range_id IN %(node_ids)s
         WHERE l.property_code = 'P2' AND {sql_where} %(params)s;""".format(
            sql_where='e.system_type =' if system_type else 'e.class_code IN')
     g.cursor.execute(sql, {
         'node_ids': tuple(node_ids),
         'params': system_type if system_type else tuple(class_code)})
     debug_model['div sql'] += 1
     return g.cursor.fetchone()[0]
Ejemplo n.º 26
0
    def handle_user(self, data):
        '''
        Insert user informations in data

        Override it to add extra user attributes.
        '''
        # Default to unauthenticated anonymous user
        data['user'] = {
            'username': '',
            'is_authenticated': False,
            'is_staff': False,
            'is_superuser': False,
            'permissions': tuple(),
        }
        if 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE_CLASSES:
            user = self.request.user
            data['user']['is_authenticated'] = user.is_authenticated()
            if hasattr(user, 'username'):
                data['user']['username'] = user.username
            elif hasattr(user, 'get_username'):
                data['user']['username'] = user.get_username()
            if hasattr(user, 'is_staff'):
                data['user']['is_staff'] = user.is_staff
            if hasattr(user, 'is_superuser'):
                data['user']['is_superuser'] = user.is_superuser
            if hasattr(user, 'get_all_permissions'):
                data['user']['permissions'] = tuple(user.get_all_permissions())
Ejemplo n.º 27
0
    def __init__(self, provides, *args, **kwargs):
        """Initializer.

        :param provides: Wrapped callable.
        :type provides: callable

        :param args: Tuple of injections.
        :type args: tuple

        :param kwargs: Dictionary of injections.
        :type kwargs: dict
        """
        if not callable(provides):
            raise Error('Provider {0} expected to get callable, '
                        'got {0}'.format('.'.join((self.__class__.__module__,
                                                   self.__class__.__name__)),
                                         provides))

        self.provides = provides

        self.args = tuple()
        self.kwargs = tuple()

        self.add_injections(*args, **kwargs)

        super(Callable, self).__init__()
Ejemplo n.º 28
0
 def __init__(self, data=[], Gap=None, MolType=None):
     """Returns a new JointEnumeration object. See class docstring for info.
     
     Expects a list of Enumeration objects, or objects that can be coerced
     into Enumeration objects (basically, anything that can be a tuple).
     
     Does NOT have an independent concept of a gap -- gets the gaps from the
     constituent subenumerations.
     """
     self.SubEnumerations = self._coerce_enumerations(data)
     sub_enum_lengths = map(len, self.SubEnumerations)
     #build factors for combining symbols.
     curr_factor = 1
     sub_enum_factors = [curr_factor]
     for i in sub_enum_lengths[-1:0:-1]:
         curr_factor *= i
         sub_enum_factors = [curr_factor] + sub_enum_factors
     self._sub_enum_factors = transpose(array([sub_enum_factors]))
     
     try:
         #figure out the gaps correctly
         gaps = [i.Gap for i in self.SubEnumerations]
         self.Gap = tuple(gaps)
         gap_indices = array([i.GapIndex for i in self.SubEnumerations])
         gap_indices *= sub_enum_factors
         self.GapIndex = sum(gap_indices)
     except (TypeError, AttributeError): #index not settable
         self.Gap = None
     
     super(JointEnumeration, self).__init__(self, self.Gap)
     #remember to reset shape after superclass init
     self.Shape = tuple(sub_enum_lengths)
Ejemplo n.º 29
0
    def test_streaming(self):
        input_iter = iter(xrange(int(10000)))
        doubled_stream = vimap.ext.sugar.imap_ordered(
            lambda x: 2 * x,
            input_iter
        )

        # take a few from the doubled output stream
        consumed = tuple(itertools.islice(doubled_stream, 40))

        # exhaust the input
        unspooled_input = tuple(input_iter)

        # now take the rest from the output stream
        rest = tuple(doubled_stream)

        num_processed = len(consumed) + len(rest)

        T.assert_gt(
            len(unspooled_input),
            9000,
            message="Most inputs should not be processed (too much spooling / "
                    "not lazy). Only {0} remained.".format(len(unspooled_input))
        )
        assert num_processed + len(unspooled_input) == 10000, "Something got dropped"

        T.assert_equal(
            consumed + rest,
            tuple(2 * i for i in xrange(num_processed)),
            message="Processed inputs weren't the first in the stream, or are out of order."
        )
Ejemplo n.º 30
0
def _set_element_code_selection_form_field(form, form_field_name, form_field_label, element_id,
                                           elements, element_code_att_name, element_name_att_name):
    element_display_str = "{code_att_name}:{name_att_name}"
    if len(elements) > 0:
        if len(form.initial) > 0:
            element_code_choices = [(getattr(element, element_code_att_name),
                                     element_display_str.format(
                                         code_att_name=str(getattr(element, element_code_att_name)),
                                         name_att_name=getattr(element, element_name_att_name))
                                     ) for element in elements if element.id != element_id]

            element_code_choices = tuple([(form.initial[element_code_att_name],
                                          element_display_str.format(
                                            code_att_name=str(form.initial[element_code_att_name]),
                                            name_att_name=form.initial[element_name_att_name]))] +
                                         element_code_choices + [("----", "----")])

        else:
            element_code_choices = [(getattr(element, element_code_att_name),
                                     element_display_str.format(
                                         code_att_name=str(getattr(element, element_code_att_name)),
                                         name_att_name=getattr(element, element_name_att_name)))
                                    for element in elements]

            element_code_choices = tuple([("----", "----")] + element_code_choices)

        form.fields[form_field_name].widget = forms.Select(
            choices=element_code_choices)
        form.fields[form_field_name].label = form_field_label
Ejemplo n.º 31
0
def vector_add(a, b):
    """Component-wise addition of two vectors.
    >>> vector_add((0, 1), (8, 9))
    (8, 10)
    """
    return tuple(map(operator.add, a, b))
Ejemplo n.º 32
0
 def calculate_banned_tokens(bbsz_idx):
     # before decoding the next token, prevent decoding of ngrams that have already appeared
     ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
     return gen_ngrams[bbsz_idx].get(ngram_index, [])
Ejemplo n.º 33
0
 def objects(self):
     return tuple(i for i in self.parameters if i.is_Object)
Ejemplo n.º 34
0
 def input(self):
     ret = [i for i in self._input + list(self.parameters) if i.is_Input]
     return tuple(filter_ordered(ret))
Ejemplo n.º 35
0
 def dimensions(self):
     return tuple(self._dimensions)
Ejemplo n.º 36
0
 def output(self):
     return tuple(self._output)
def group(lst, n):
    for i in range(0, len(lst), n):
        val = lst[i:i+n]
        if len(val) == n:
            yield tuple(val)
Ejemplo n.º 38
0
 def get_rule_ids(self, cr, uid, ids, check_uid, model_name, mode="read"):
     if check_uid == SUPERUSER_ID:
         return []
     res_ids = []
     model_pooler = self.pool[model_name]
     cr.execute(
         """
             SELECT r.id
             FROM ir_rule r
             JOIN ir_model m ON (r.model_id = m.id)
             WHERE m.model = %s
             AND r.active is True
             AND r.perm_""" + mode + """
             AND (r.id IN (SELECT rule_group_id FROM rule_group_rel g_rel
                         JOIN res_groups_users_rel u_rel ON (g_rel.group_id = u_rel.gid)
                         WHERE u_rel.uid = %s) OR r.global)""",
         (model_name, check_uid))
     rule_ids = [x[0] for x in cr.fetchall()]
     if rule_ids:
         # browse user as super-admin root to avoid access errors!
         user = self.pool['res.users'].browse(cr, SUPERUSER_ID, check_uid)
         rule_datas = self.pool['ir.rule'].browse(cr, SUPERUSER_ID,
                                                  rule_ids)
         for rule in rule_datas:
             global_domains = []  # list of domains
             # map: group -> list of domains
             group_domains = {}
             # read 'domain' as UID to have the correct eval context for the
             # rule.
             rule_domain = rule.domain
             #                rule_domain = rule_domain['domain']
             dom = expression.normalize_domain(rule_domain)
             for group in rule.groups:
                 if group in user.groups_id:
                     group_domains.setdefault(group, []).append(dom)
             if not rule.groups:
                 global_domains.append(dom)
             # combine global domains and group domains
             if group_domains:
                 group_domain = expression.OR(
                     map(expression.OR, group_domains.values()))
             else:
                 group_domain = []
             domain = expression.AND(global_domains + [group_domain])
             if domain:
                 # _where_calc is called as superuser. This means that rules can
                 # involve objects on which the real uid has no acces rights.
                 # This means also there is no implicit restriction (e.g. an object
                 # references another object the user can't see).
                 query = self.pool.get(model_name)._where_calc(
                     cr, SUPERUSER_ID, domain, active_test=False)
                 where_clause, where_params, tables = query.where_clause, query.where_clause_params, query.tables
                 if where_clause:
                     where_clause = ' and ' + ' and '.join(where_clause)
                     cr.execute(
                         'SELECT ' + model_pooler._table + '.id FROM ' +
                         ','.join(tables) + ' WHERE ' +
                         model_pooler._table + '.id IN %s' + where_clause,
                         ([tuple(ids)] + where_params))
                     returned_ids = [x['id'] for x in cr.dictfetchall()]
                     check_rs = self.profile_check_record_rules_result_count(
                         cr,
                         check_uid,
                         ids,
                         returned_ids,
                         mode,
                         model_pooler,
                         context={})
                     if not check_rs:
                         res_ids.append(rule.id)
     return res_ids
    def get_effective_column_names(
        self,
        batch_ids: Optional[List[str]] = None,
        validator: Optional["Validator"] = None,  # noqa: F821
        variables: Optional[ParameterContainer] = None,
    ) -> List[str]:
        # Obtain include_column_names from "rule state" (i.e., variables and parameters); from instance variable otherwise.
        include_column_names: Optional[
            List[str]
        ] = get_parameter_value_and_validate_return_type(
            domain=None,
            parameter_reference=self.include_column_names,
            expected_return_type=None,
            variables=variables,
            parameters=None,
        )

        # Obtain exclude_column_names from "rule state" (i.e., variables and parameters); from instance variable otherwise.
        exclude_column_names: Optional[
            List[str]
        ] = get_parameter_value_and_validate_return_type(
            domain=None,
            parameter_reference=self.exclude_column_names,
            expected_return_type=None,
            variables=variables,
            parameters=None,
        )

        if batch_ids is None:
            batch_ids: List[str] = self.get_batch_ids(variables=variables)

        if validator is None:
            validator = self.get_validator(variables=variables)

        table_columns: List[str] = validator.get_metric(
            metric=MetricConfiguration(
                metric_name="table.columns",
                metric_domain_kwargs={
                    "batch_id": batch_ids[-1],  # active_batch_id
                },
                metric_value_kwargs=None,
                metric_dependencies=None,
            )
        )

        effective_column_names: List[str] = include_column_names or table_columns

        if exclude_column_names is None:
            exclude_column_names = []

        column_name: str

        effective_column_names = [
            column_name
            for column_name in effective_column_names
            if column_name not in exclude_column_names
        ]

        for column_name in effective_column_names:
            if column_name not in table_columns:
                raise ge_exceptions.ProfilerExecutionError(
                    message=f'Error: The column "{column_name}" in BatchData does not exist.'
                )

        # include_column_name_suffixes column_name_suffixes from "rule state" (i.e., variables and parameters); from instance variable otherwise.
        include_column_name_suffixes: Optional[
            Union[str, Iterable, List[str]]
        ] = get_parameter_value_and_validate_return_type(
            domain=None,
            parameter_reference=self.include_column_name_suffixes,
            expected_return_type=None,
            variables=variables,
            parameters=None,
        )

        # exclude_column_name_suffixes column_name_suffixes from "rule state" (i.e., variables and parameters); from instance variable otherwise.
        exclude_column_name_suffixes: Optional[
            Union[str, Iterable, List[str]]
        ] = get_parameter_value_and_validate_return_type(
            domain=None,
            parameter_reference=self.exclude_column_name_suffixes,
            expected_return_type=None,
            variables=variables,
            parameters=None,
        )

        if include_column_name_suffixes:
            if isinstance(include_column_name_suffixes, str):
                include_column_name_suffixes = [include_column_name_suffixes]
            else:
                if not isinstance(include_column_name_suffixes, (Iterable, list)):
                    raise ValueError(
                        "Unrecognized include_column_name_suffixes directive -- must be a list or a string."
                    )

            effective_column_names: List[str] = list(
                filter(
                    lambda candidate_column_name: candidate_column_name.endswith(
                        tuple(include_column_name_suffixes)
                    ),
                    effective_column_names,
                )
            )

        if exclude_column_name_suffixes:
            if isinstance(exclude_column_name_suffixes, str):
                exclude_column_name_suffixes = [exclude_column_name_suffixes]
            else:
                if not isinstance(exclude_column_name_suffixes, (Iterable, list)):
                    raise ValueError(
                        "Unrecognized exclude_column_name_suffixes directive -- must be a list or a string."
                    )

            effective_column_names: List[str] = list(
                filter(
                    lambda candidate_column_name: not candidate_column_name.endswith(
                        tuple(exclude_column_name_suffixes)
                    ),
                    effective_column_names,
                )
            )

        # Obtain semantic_type_filter_module_name from "rule state" (i.e., variables and parameters); from instance variable otherwise.
        semantic_type_filter_module_name: Optional[
            str
        ] = get_parameter_value_and_validate_return_type(
            domain=None,
            parameter_reference=self.semantic_type_filter_module_name,
            expected_return_type=None,
            variables=variables,
            parameters=None,
        )
        if semantic_type_filter_module_name is None:
            semantic_type_filter_module_name = "great_expectations.rule_based_profiler.helpers.simple_semantic_type_filter"

        # Obtain semantic_type_filter_class_name from "rule state" (i.e., variables and parameters); from instance variable otherwise.
        semantic_type_filter_class_name: Optional[
            str
        ] = get_parameter_value_and_validate_return_type(
            domain=None,
            parameter_reference=self.semantic_type_filter_class_name,
            expected_return_type=None,
            variables=variables,
            parameters=None,
        )
        if semantic_type_filter_class_name is None:
            semantic_type_filter_class_name = "SimpleSemanticTypeFilter"

        semantic_type_filter: SemanticTypeFilter = instantiate_class_from_config(
            config={
                "module_name": semantic_type_filter_module_name,
                "class_name": semantic_type_filter_class_name,
            },
            runtime_environment={
                "batch_ids": batch_ids,
                "validator": validator,
                "column_names": effective_column_names,
            },
            config_defaults={},
        )
        self._semantic_type_filter = semantic_type_filter

        # Obtain include_semantic_types from "rule state" (i.e., variables and parameters); from instance variable otherwise.
        include_semantic_types: Optional[
            Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
        ] = get_parameter_value_and_validate_return_type(
            domain=None,
            parameter_reference=self.include_semantic_types,
            expected_return_type=None,
            variables=variables,
            parameters=None,
        )
        include_semantic_types = (
            self.semantic_type_filter.parse_semantic_domain_type_argument(
                semantic_types=include_semantic_types
            )
        )

        # Obtain exclude_semantic_types from "rule state" (i.e., variables and parameters); from instance variable otherwise.
        exclude_semantic_types: Optional[
            Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
        ] = get_parameter_value_and_validate_return_type(
            domain=None,
            parameter_reference=self.exclude_semantic_types,
            expected_return_type=None,
            variables=variables,
            parameters=None,
        )
        exclude_semantic_types = (
            self.semantic_type_filter.parse_semantic_domain_type_argument(
                semantic_types=exclude_semantic_types
            )
        )

        if include_semantic_types:
            effective_column_names = list(
                filter(
                    lambda candidate_column_name: self.semantic_type_filter.table_column_name_to_inferred_semantic_domain_type_map[
                        candidate_column_name
                    ]
                    in include_semantic_types,
                    effective_column_names,
                )
            )

        if exclude_semantic_types:
            effective_column_names = list(
                filter(
                    lambda candidate_column_name: self.semantic_type_filter.table_column_name_to_inferred_semantic_domain_type_map[
                        candidate_column_name
                    ]
                    not in exclude_semantic_types,
                    effective_column_names,
                )
            )

        return effective_column_names
Ejemplo n.º 40
0
    def generate(
        self,
        models,
        sample,
        prefix_tokens=None,
        bos_token=None,
        **kwargs
    ):
        """Generate a batch of translations.

        Args:
            models (List[~fairseq.models.FairseqModel]): ensemble of models
            sample (dict): batch
            prefix_tokens (torch.LongTensor, optional): force decoder to begin
                with these tokens
        """
        model = EnsembleModel(models)
        if not self.retain_dropout:
            model.eval()

        # model.forward normally channels prev_output_tokens into the decoder
        # separately, but SequenceGenerator directly calls model.encoder
        encoder_input = {
            k: v for k, v in sample['net_input'].items()
            if k != 'prev_output_tokens' and k != 'bert_input'
        }

        src_tokens = encoder_input['src_tokens']
        src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
        input_size = src_tokens.size()
        # batch dimension goes first followed by source lengths
        bsz = input_size[0]
        src_len = input_size[1]
        beam_size = self.beam_size

        if self.match_source_len:
            max_len = src_lengths.max().item()
        else:
            max_len = min(
                int(self.max_len_a * src_len + self.max_len_b),
                # exclude the EOS marker
                model.max_decoder_positions() - 1,
            )

        # compute the encoder output for each beam
        bertinput = sample['net_input']['bert_input']
        bert_encoder_padding_mask = bertinput.eq(model.models[0].berttokenizer.pad())
        bert_outs, _ = model.models[0].bert_encoder(bertinput, output_all_encoded_layers=True, attention_mask= 1. - bert_encoder_padding_mask)
        bert_outs = bert_outs[self.bert_output_layer]
        if model.models[0].mask_cls_sep:
            bert_encoder_padding_mask += bertinput.eq(model.models[0].berttokenizer.cls())
            bert_encoder_padding_mask += bertinput.eq(model.models[0].berttokenizer.sep())
        bert_outs = bert_outs.permute(1,0,2).contiguous()
        # bert_outs = F.linear(bert_outs, model.models[0].trans_weight, model.models[0].trans_bias)
        bert_outs = [{
            'bert_encoder_out': bert_outs,
            'bert_encoder_padding_mask': bert_encoder_padding_mask,
        }]
        if model.models[0].__class__.__name__ == 'TransformerS2Model':
            encoder_input['bert_encoder_out'] = bert_outs[0]
        encoder_outs = model.forward_encoder(encoder_input)
        new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
        new_order = new_order.to(src_tokens.device).long()
        encoder_outs, bert_outs = model.reorder_encoder_out(encoder_outs, bert_outs, new_order)

        # initialize buffers
        scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
        scores_buf = scores.clone()
        tokens = src_tokens.data.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
        tokens_buf = tokens.clone()
        tokens[:, 0] = bos_token or self.eos
        attn, attn_buf = None, None
        nonpad_idxs = None
        if prefix_tokens is not None:
            partial_prefix_mask_buf = torch.zeros_like(src_lengths).byte()

        # list of completed sentences
        finalized = [[] for i in range(bsz)]
        finished = [False for i in range(bsz)]
        worst_finalized = [{'idx': None, 'score': -math.inf} for i in range(bsz)]
        num_remaining_sent = bsz

        # number of candidate hypos per step
        cand_size = 2 * beam_size  # 2 x beam size in case half are EOS

        # offset arrays for converting between different indexing schemes
        bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
        cand_offsets = torch.arange(0, cand_size).type_as(tokens)

        # helper function for allocating buffers on the fly
        buffers = {}

        def buffer(name, type_of=tokens):  # noqa
            if name not in buffers:
                buffers[name] = type_of.new()
            return buffers[name]

        def is_finished(sent, step, unfin_idx, unfinalized_scores=None):
            """
            Check whether we've finished generation for a given sentence, by
            comparing the worst score among finalized hypotheses to the best
            possible score among unfinalized hypotheses.
            """
            assert len(finalized[sent]) <= beam_size
            if len(finalized[sent]) == beam_size:
                if self.stop_early or step == max_len or unfinalized_scores is None:
                    return True
                # stop if the best unfinalized score is worse than the worst
                # finalized one
                best_unfinalized_score = unfinalized_scores[unfin_idx].max()
                if self.normalize_scores:
                    best_unfinalized_score /= max_len ** self.len_penalty
                if worst_finalized[sent]['score'] >= best_unfinalized_score:
                    return True
            return False

        def finalize_hypos(step, bbsz_idx, eos_scores, unfinalized_scores=None):
            """
            Finalize the given hypotheses at this step, while keeping the total
            number of finalized hypotheses per sentence <= beam_size.

            Note: the input must be in the desired finalization order, so that
            hypotheses that appear earlier in the input are preferred to those
            that appear later.

            Args:
                step: current time step
                bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
                    indicating which hypotheses to finalize
                eos_scores: A vector of the same size as bbsz_idx containing
                    scores for each hypothesis
                unfinalized_scores: A vector containing scores for all
                    unfinalized hypotheses
            """
            assert bbsz_idx.numel() == eos_scores.numel()

            # clone relevant token and attention tensors
            tokens_clone = tokens.index_select(0, bbsz_idx)
            tokens_clone = tokens_clone[:, 1:step + 2]  # skip the first index, which is EOS
            tokens_clone[:, step] = self.eos
            attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None

            # compute scores per token position
            pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
            pos_scores[:, step] = eos_scores
            # convert from cumulative to per-position scores
            pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]

            # normalize sentence-level scores
            if self.normalize_scores:
                eos_scores /= (step + 1) ** self.len_penalty

            cum_unfin = []
            prev = 0
            for f in finished:
                if f:
                    prev += 1
                else:
                    cum_unfin.append(prev)

            sents_seen = set()
            for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
                unfin_idx = idx // beam_size
                sent = unfin_idx + cum_unfin[unfin_idx]

                sents_seen.add((sent, unfin_idx))

                if self.match_source_len and step > src_lengths[unfin_idx]:
                    score = -math.inf

                def get_hypo():

                    if attn_clone is not None:
                        # remove padding tokens from attn scores
                        hypo_attn = attn_clone[i][nonpad_idxs[sent]]
                        _, alignment = hypo_attn.max(dim=0)
                    else:
                        hypo_attn = None
                        alignment = None

                    return {
                        'tokens': tokens_clone[i],
                        'score': score,
                        'attention': hypo_attn,  # src_len x tgt_len
                        'alignment': alignment,
                        'positional_scores': pos_scores[i],
                    }

                if len(finalized[sent]) < beam_size:
                    finalized[sent].append(get_hypo())
                elif not self.stop_early and score > worst_finalized[sent]['score']:
                    # replace worst hypo for this sentence with new/better one
                    worst_idx = worst_finalized[sent]['idx']
                    if worst_idx is not None:
                        finalized[sent][worst_idx] = get_hypo()

                    # find new worst finalized hypo for this sentence
                    idx, s = min(enumerate(finalized[sent]), key=lambda r: r[1]['score'])
                    worst_finalized[sent] = {
                        'score': s['score'],
                        'idx': idx,
                    }

            newly_finished = []
            for sent, unfin_idx in sents_seen:
                # check termination conditions for this sentence
                if not finished[sent] and is_finished(sent, step, unfin_idx, unfinalized_scores):
                    finished[sent] = True
                    newly_finished.append(unfin_idx)
            return newly_finished

        reorder_state = None
        batch_idxs = None
        for step in range(max_len + 1):  # one extra step for EOS marker
            # reorder decoder internal states based on the prev choice of beams
            if reorder_state is not None:
                if batch_idxs is not None:
                    # update beam indices to take into account removed sentences
                    corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
                    reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
                model.reorder_incremental_state(reorder_state)
                encoder_outs, bert_outs = model.reorder_encoder_out(encoder_outs, bert_outs, reorder_state)

            lprobs, avg_attn_scores = model.forward_decoder(
                tokens[:, :step + 1], encoder_outs, bert_outs, temperature=self.temperature,
            )

            lprobs[:, self.pad] = -math.inf  # never select pad
            lprobs[:, self.unk] -= self.unk_penalty  # apply unk penalty

            if self.no_repeat_ngram_size > 0:
                # for each beam and batch sentence, generate a list of previous ngrams
                gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
                for bbsz_idx in range(bsz * beam_size):
                    gen_tokens = tokens[bbsz_idx].tolist()
                    for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
                        gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
                                gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]

            # Record attention scores
            if avg_attn_scores is not None:
                if attn is None:
                    attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
                    attn_buf = attn.clone()
                    nonpad_idxs = src_tokens.ne(self.pad)
                attn[:, :, step + 1].copy_(avg_attn_scores)

            scores = scores.type_as(lprobs)
            scores_buf = scores_buf.type_as(lprobs)
            eos_bbsz_idx = buffer('eos_bbsz_idx')
            eos_scores = buffer('eos_scores', type_of=scores)
            if step < max_len:
                self.search.set_src_lengths(src_lengths)

                if self.no_repeat_ngram_size > 0:
                    def calculate_banned_tokens(bbsz_idx):
                        # before decoding the next token, prevent decoding of ngrams that have already appeared
                        ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
                        return gen_ngrams[bbsz_idx].get(ngram_index, [])

                    if step + 2 - self.no_repeat_ngram_size >= 0:
                        # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
                        banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
                    else:
                        banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]

                    for bbsz_idx in range(bsz * beam_size):
                        lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf

                if prefix_tokens is not None and step < prefix_tokens.size(1):
                    assert isinstance(self.search, search.BeamSearch), \
                            "currently only BeamSearch supports decoding with prefix_tokens"
                    probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :]
                    cand_scores = torch.gather(
                        probs_slice, dim=1,
                        index=prefix_tokens[:, step].view(-1, 1)
                    ).view(-1, 1).repeat(1, cand_size)
                    if step > 0:
                        # save cumulative scores for each hypothesis
                        cand_scores.add_(scores[:, step - 1].view(bsz, beam_size).repeat(1, 2))
                    cand_indices = prefix_tokens[:, step].view(-1, 1).repeat(1, cand_size)
                    cand_beams = torch.zeros_like(cand_indices)

                # handle prefixes of different lengths
                # when step == prefix_tokens.size(1), we'll have new free-decoding batches
                if prefix_tokens is not None and step <= prefix_tokens.size(1):
                    if step < prefix_tokens.size(1):
                        partial_prefix_mask = prefix_tokens[:, step].eq(self.pad)
                    else:   #  all prefixes finished force-decoding
                        partial_prefix_mask = torch.ones(bsz).to(prefix_tokens).byte()
                    if partial_prefix_mask.any():
                        # track new free-decoding batches, at whose very first step
                        # only use the first beam to eliminate repeats
                        prefix_step0_mask = partial_prefix_mask ^ partial_prefix_mask_buf
                        lprobs.view(bsz, beam_size, -1)[prefix_step0_mask, 1:] = -math.inf
                        partial_scores, partial_indices, partial_beams = self.search.step(
                            step,
                            lprobs.view(bsz, -1, self.vocab_size),
                            scores.view(bsz, beam_size, -1)[:, :, :step],
                        )
                        cand_scores[partial_prefix_mask] = partial_scores[partial_prefix_mask]
                        cand_indices[partial_prefix_mask] = partial_indices[partial_prefix_mask]
                        cand_beams[partial_prefix_mask] = partial_beams[partial_prefix_mask]
                        partial_prefix_mask_buf = partial_prefix_mask

                else:
                    cand_scores, cand_indices, cand_beams = self.search.step(
                        step,
                        lprobs.view(bsz, -1, self.vocab_size),
                        scores.view(bsz, beam_size, -1)[:, :, :step],
                    )
            else:
                # make probs contain cumulative scores for each hypothesis
                lprobs.add_(scores[:, step - 1].unsqueeze(-1))

                # finalize all active hypotheses once we hit max_len
                # pick the hypothesis with the highest prob of EOS right now
                torch.sort(
                    lprobs[:, self.eos],
                    descending=True,
                    out=(eos_scores, eos_bbsz_idx),
                )
                num_remaining_sent -= len(finalize_hypos(step, eos_bbsz_idx, eos_scores))
                assert num_remaining_sent == 0
                break

            # cand_bbsz_idx contains beam indices for the top candidate
            # hypotheses, with a range of values: [0, bsz*beam_size),
            # and dimensions: [bsz, cand_size]
            cand_bbsz_idx = cand_beams.add(bbsz_offsets)

            # finalize hypotheses that end in eos
            eos_mask = cand_indices.eq(self.eos)

            finalized_sents = set()
            if step >= self.min_len:
                # only consider eos when it's among the top beam_size indices
                torch.masked_select(
                    cand_bbsz_idx[:, :beam_size],
                    mask=eos_mask[:, :beam_size],
                    out=eos_bbsz_idx,
                )
                if eos_bbsz_idx.numel() > 0:
                    torch.masked_select(
                        cand_scores[:, :beam_size],
                        mask=eos_mask[:, :beam_size],
                        out=eos_scores,
                    )
                    finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores, cand_scores)
                    num_remaining_sent -= len(finalized_sents)

            assert num_remaining_sent >= 0
            if num_remaining_sent == 0:
                break
            assert step < max_len

            if len(finalized_sents) > 0:
                new_bsz = bsz - len(finalized_sents)

                # construct batch_idxs which holds indices of batches to keep for the next pass
                batch_mask = cand_indices.new_ones(bsz)
                batch_mask[cand_indices.new(finalized_sents)] = 0
                batch_idxs = batch_mask.nonzero().squeeze(-1)

                eos_mask = eos_mask[batch_idxs]
                cand_beams = cand_beams[batch_idxs]
                bbsz_offsets.resize_(new_bsz, 1)
                cand_bbsz_idx = cand_beams.add(bbsz_offsets)
                cand_scores = cand_scores[batch_idxs]
                cand_indices = cand_indices[batch_idxs]
                if prefix_tokens is not None:
                    prefix_tokens = prefix_tokens[batch_idxs]
                    partial_prefix_mask_buf = partial_prefix_mask_buf[batch_idxs]
                src_lengths = src_lengths[batch_idxs]

                scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
                scores_buf.resize_as_(scores)
                tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
                tokens_buf.resize_as_(tokens)
                if attn is not None:
                    attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
                    attn_buf.resize_as_(attn)
                bsz = new_bsz
            else:
                batch_idxs = None

            # set active_mask so that values > cand_size indicate eos hypos
            # and values < cand_size indicate candidate active hypos.
            # After, the min values per row are the top candidate active hypos
            active_mask = buffer('active_mask')
            torch.add(
                eos_mask.type_as(cand_offsets) * cand_size,
                cand_offsets[:eos_mask.size(1)],
                out=active_mask,
            )

            # get the top beam_size active hypotheses, which are just the hypos
            # with the smallest values in active_mask
            active_hypos, _ignore = buffer('active_hypos'), buffer('_ignore')
            torch.topk(
                active_mask, k=beam_size, dim=1, largest=False,
                out=(_ignore, active_hypos)
            )

            active_bbsz_idx = buffer('active_bbsz_idx')
            torch.gather(
                cand_bbsz_idx, dim=1, index=active_hypos,
                out=active_bbsz_idx,
            )
            active_scores = torch.gather(
                cand_scores, dim=1, index=active_hypos,
                out=scores[:, step].view(bsz, beam_size),
            )

            active_bbsz_idx = active_bbsz_idx.view(-1)
            active_scores = active_scores.view(-1)

            # copy tokens and scores for active hypotheses
            torch.index_select(
                tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
                out=tokens_buf[:, :step + 1],
            )
            torch.gather(
                cand_indices, dim=1, index=active_hypos,
                out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
            )
            if step > 0:
                torch.index_select(
                    scores[:, :step], dim=0, index=active_bbsz_idx,
                    out=scores_buf[:, :step],
                )
            torch.gather(
                cand_scores, dim=1, index=active_hypos,
                out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
            )

            # copy attention for active hypotheses
            if attn is not None:
                torch.index_select(
                    attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
                    out=attn_buf[:, :, :step + 2],
                )

            # swap buffers
            tokens, tokens_buf = tokens_buf, tokens
            scores, scores_buf = scores_buf, scores
            if attn is not None:
                attn, attn_buf = attn_buf, attn

            # reorder incremental state in decoder
            reorder_state = active_bbsz_idx

        # sort by score descending
        for sent in range(len(finalized)):
            finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)

        return finalized
Ejemplo n.º 41
0
 def _clean_items(self, items):
     return tuple(items)
Ejemplo n.º 42
0
    def get_io_stat(self, elapsed):
        stat_name = 'io'
        self.curr_stat['io'] = tuple(0 for _ in range(11))
        ret = []

        num_disk = 0

        for l in self.get_stat(stat_name):
            if l[2] in self.block_devices and len(l) >= 14:
                # total io stat
                self.curr_stat['io'] = tuple(
                    self.curr_stat['io'][i] + int(l[i + 3]) for i in range(11))
                num_disk += 1

                # per disk io stat
                self.curr_stat['io_' + l[2]] = tuple(
                    int(l[i + 3]) for i in range(11))

            # https://www.percona.com/doc/percona-toolkit/2.1/pt-diskstats.html
        for disk in ['io'] + ['io_' + d for d in self.block_devices]:
            if disk not in self.curr_stat or disk not in self.old_stat:
                continue

            rd, rd_mrg, rd_sec, rd_tim, wr, wr_mrg, wr_sec, wr_tim, in_prg, t1, t2 = tuple(
                1.0 * (self.curr_stat[disk][i] - self.old_stat[disk][i])
                for i in range(11))
            in_prg = self.curr_stat[disk][8]

            rd_rt, wr_rt, busy, io_s, qtime, ttime, stime = tuple(
                0 for i in range(7))

            if rd + rd_mrg > 0:
                rd_rt = rd_tim / (rd + rd_mrg)
            if wr + wr_mrg > 0:
                wr_rt = wr_tim / (wr + wr_mrg)
            busy = 100 * t1 / 1000 / elapsed
            io_s = (rd + wr) / elapsed
            if rd + rd_mrg + wr + wr_mrg > 0:
                stime = t1 / (rd + rd_mrg + wr + wr_mrg)
            if rd + rd_mrg + wr + wr_mrg + in_prg > 0:
                ttime = t2 / (rd + rd_mrg + wr + wr_mrg + in_prg)

            qtime = ttime - stime

            rd_s, rd_avgkb, rd_m_s, rd_cnc, rd_mrg_s, wr_s, wr_avgkb, wr_m_s, wr_cnc, wr_mrg_s = tuple(
                0 for i in range(10))

            rd_s = rd / elapsed
            if rd > 0:
                rd_avgkb = rd_sec / rd / 2
            rd_m_s = rd_sec / 2 / 1024 / elapsed
            rd_cnc = rd_tim / 1000 / elapsed
            rd_mrg_s = rd_mrg / elapsed

            wr_s = wr / elapsed
            if wr > 0:
                wr_avgkb = wr_sec / wr / 2
            wr_m_s = wr_sec / 2 / 1024 / elapsed
            wr_cnc = wr_tim / 1000 / elapsed
            wr_mrg_s = wr_mrg / elapsed

            # io_read, io_write, io_queue, io_await, io_svctm, io_util, io_read_mb, io_write_mb,
            if disk == 'io':
                # total disk io stat
                self.stat[disk] = (rd_s, wr_s, in_prg, ttime, stime,
                                   busy / num_disk, rd_m_s, wr_m_s)
            else:
                label = ('rd_s', 'rd_avgkb', 'rd_m_s', 'rd_mrg_s', 'rd_cnc',
                         'rd_rt', 'wr_s', 'wr_avgkb', 'wr_m_s', 'wr_mrg_s',
                         'wr_cnc', 'wr_rt', 'busy', 'in_prg', 'io_s', 'qtime',
                         'stime')

                # per disk io stat
                self.stat[disk] = (rd_s, rd_avgkb, rd_m_s, rd_mrg_s, rd_cnc,
                                   rd_rt, wr_s, wr_avgkb, wr_m_s, wr_mrg_s,
                                   wr_cnc, wr_rt, busy, in_prg, io_s, qtime,
                                   stime)

                diskstat = format_stat(label, self.stat[disk])
                diskstat['dev'] = disk[3:]
                ret.append(diskstat)
            self.old_stat[disk] = self.curr_stat[disk]
        return ret
Ejemplo n.º 43
0
def load_input_crop(ios,
                    offset_wc,
                    contexts_wc,
                    input_shapes_wc,
                    padding_mode='reflect',
                    pad_value=0):
    if isinstance(ios, tuple) or isinstance(ios, list):
        assert (isinstance(contexts_wc[0], tuple)
                or isinstance(contexts_wc[0], list))
        assert len(contexts_wc) == len(ios)
    else:
        ios = (ios, )
        contexts_wc = (contexts_wc, )
    datas = []

    offset_correction_wc = []
    for dim in range(len(offset_wc)):
        max_voxel_size = ios[0].voxel_size[dim]
        max_voxel_size_io_idx = [
            0,
        ]
        for k, io in enumerate(ios[1:]):
            if io.voxel_size[dim] > max_voxel_size:
                max_voxel_size = io.voxel_size[dim]
                max_voxel_size_io_idx = [
                    k + 1,
                ]
            elif io.voxel_size[dim] == max_voxel_size:
                max_voxel_size_io_idx += k + 1
        if len(max_voxel_size_io_idx) > 1:
            assert all([
                contexts_wc[i][dim] == contexts_wc[max_voxel_size_io_idx[0]]
                [dim] for i in max_voxel_size_io_idx
            ])
        offset_correction_wc.append(
            (offset_wc[dim] - contexts_wc[max_voxel_size_io_idx[0]][dim]) %
            max_voxel_size)

    for io, context_wc, input_shape_wc in zip(ios, contexts_wc,
                                              input_shapes_wc):
        starts_wc = tuple(
            np.array(offset_wc) - np.array(context_wc) -
            np.array(offset_correction_wc))
        stops_wc = tuple(np.array(starts_wc) + np.array(input_shape_wc))
        shape_wc = io.shape

        # we pad the input volume if necessary
        pad_left_wc = None
        pad_right_wc = None

        # check for padding to the left
        if any(start_wc < 0 for start_wc in starts_wc):
            pad_left_wc = tuple(
                abs(start_wc) if start_wc < 0 else 0 for start_wc in starts_wc)
            starts_wc = [max(0, start_wc) for start_wc in starts_wc]

        # check for padding to the right
        if any(stop_wc > shape_wc[i] for i, stop_wc in enumerate(stops_wc)):
            pad_right_wc = tuple(stop_wc -
                                 shape_wc[i] if stop_wc > shape_wc[i] else 0
                                 for i, stop_wc in enumerate(stops_wc))
            stops_wc = [
                min(shape_wc[i], stop_wc) for i, stop_wc in enumerate(stops_wc)
            ]
        data = io.read(starts_wc, stops_wc)

        # pad if necessary
        if pad_left_wc is not None or pad_right_wc is not None:
            pad_left_wc = (0, 0, 0) if pad_left_wc is None else pad_left_wc
            pad_right_wc = (0, 0, 0) if pad_right_wc is None else pad_right_wc
            assert all(
                pad_right_wc % res == 0
                for pad_right_wc, res in zip(pad_right_wc, io.voxel_size))
            assert all(pad_left_wc % res == 0
                       for pad_left_wc, res in zip(pad_left_wc, io.voxel_size))
            pad_right_vc = tuple(
                int(pad_right_wc / res)
                for pad_right_wc, res in zip(pad_right_wc, io.voxel_size))
            pad_left_vc = tuple(
                int(pad_left_wc / res)
                for pad_left_wc, res in zip(pad_left_wc, io.voxel_size))
            pad_width_vc = tuple(
                (pl_vc, pr_vc)
                for pl_vc, pr_vc in zip(pad_left_vc, pad_right_vc))
            if padding_mode == 'constant':
                datas.append(
                    np.pad(data,
                           pad_width_vc,
                           mode=padding_mode,
                           constant_values=pad_value))
            else:
                datas.append(np.pad(data, pad_width_vc, mode=padding_mode))
        else:
            datas.append(data)

    return datas, offset_correction_wc
Ejemplo n.º 44
0
def break_code(string, corpus):
    if len(string) < 4000:
        n = 10
        iterations = 25000
    else:
        n = 6
        iterations = 10000
    start_time = time.time()
    dictionary_alphabet = dict.fromkeys(stt.ascii_lowercase, (0, 0))
    corpus_tuple = tuple(corpus.split())
    string = string[0:len(string) - 1]
    # W0 probabilities for each alphabet
    for i in range(len(dictionary_alphabet.keys())):
        list_words = [w for w in corpus_tuple if w.startswith(stt.ascii_lowercase[i])]
        dictionary_alphabet[stt.ascii_lowercase[i]] = (len(list_words), len(list_words) / (len(corpus_tuple)))

    # transition probabilities
    transition_dict = {}
    for word in corpus_tuple:
        transition_val = zip(word, word[1:])
        for trans_val in transition_val:
            transition_dict[trans_val] = transition_dict.get(trans_val, 0) + 1
    total_sum = sum(transition_dict.values())
    transition_dict = {x: transition_dict[x] / total_sum for x in transition_dict}

    #replacement and rearrangement tables
    letters = list(range(ord('a'), ord('z') + 1))
    random.shuffle(letters)
    replace_table_original = dict(zip(map(chr, range(ord('a'), ord('z') + 1)), map(chr, letters)))
    # replace_table_original ={'n': 'e','b': 'a','y': 'r','x': 'i','k': 'o','l': 't','m': 'n','s': 's','h': 'l','t': 'c','r': 'u','a': 'd',
    #                          'u': 'p','q': 'm','w': 'h', 'i': 'g', 'j': 'b', 'e': 'f','v': 'y','d': 'w','c': 'k','z': 'v','o': 'x','p': 'z','f': 'j','g': 'q'}

    #rearrangement table
    rearrange_table_original = list(range(0, 4))
    random.shuffle(rearrange_table_original)

    # decode for the first time to obtain p_d
    # decode by traversing in reverse first decrypt using the rearrangement code and then decrypt using the replace code
    rearrange_decode = "".join(
        ["".join([string[rearrange_table_original[j] + i] for j in range(0, len(rearrange_table_original))]) for i in
         range(0, len(string), len(rearrange_table_original))])

    #decode using the replace table
    d_decode = rearrange_decode.translate({ord(i): ord(replace_table_original[i]) for i in replace_table_original})
    #calculate the initial probability
    p_d = calculate_probability(d_decode, transition_dict, dictionary_alphabet)

    #loop to run the algorightms multiple times
    for loop in range(0, n):
        # each loop must run these many iterations
        for ran in range(iterations):
            rand_num = np.random.randint(0, 2)
            if rand_num == 0:
                decode_T_dash = "".join(
                    ["".join([string[rearrange_table_original[j] + i] for j in range(0, len(rearrange_table_original))])
                     for i in
                     range(0, len(string), len(rearrange_table_original))])

                decode_T_dash1, random_alpha1, random_alpha2 = decode_replacement(decode_T_dash,
                                                                                  dict(replace_table_original))
                p_d_dash = calculate_probability(decode_T_dash1, transition_dict, dictionary_alphabet)
            else:
                rearrange_decode1, random_number1, random_number2 = decode_rearrangement(string,
                                                                                         list(rearrange_table_original))
                decode_T_dash1 = rearrange_decode1.translate(
                    {ord(i): ord(replace_table_original[i]) for i in replace_table_original})
                p_d_dash = calculate_probability(decode_T_dash1, transition_dict, dictionary_alphabet)

            if p_d_dash > p_d:
                if rand_num == 0:
                    replace_table_original[random_alpha1], replace_table_original[random_alpha2] = \
                    replace_table_original[random_alpha2], \
                    replace_table_original[random_alpha1]
                else:
                    #             print(rearrange_table_original)
                    rearrange_table_original[random_number1], rearrange_table_original[random_number2] = \
                    rearrange_table_original[random_number2], \
                    rearrange_table_original[random_number1]
                #             print(rearrange_table_original)

                p_d = p_d_dash
                best_decoded = decode_T_dash1
            else:
                rand_nums = np.random.binomial(1, np.exp(p_d_dash - p_d))
                if rand_nums == 1:
                    if rand_num == 0:
                        replace_table_original[random_alpha1], replace_table_original[random_alpha2] = \
                        replace_table_original[random_alpha2], \
                        replace_table_original[random_alpha1]
                    else:
                        rearrange_table_original[random_number1], rearrange_table_original[random_number2] = \
                        rearrange_table_original[random_number2], \
                        rearrange_table_original[random_number1]
                    p_d = p_d_dash
                    best_decoded = decode_T_dash1

    return best_decoded
Ejemplo n.º 45
0
def run_inference(prediction,
                  preprocess,
                  postprocess,
                  io_ins,
                  io_outs,
                  offset_list,
                  input_shapes_wc,
                  output_shape_wc,
                  padding_mode='constant',
                  num_cpus=5,
                  log_processed=None):

    assert callable(prediction)
    assert callable(preprocess)

    n_blocks = len(offset_list)
    print("Starting prediction...")
    print("For %i blocks" % n_blocks)
    if not (isinstance(io_ins, list) or isinstance(io_ins, tuple)):
        io_ins = (io_ins, )
        input_shapes_wc = (input_shapes_wc, )

    # the additional context requested in the input
    contexts_wc = []
    for in_sh_wc, io_in in zip(input_shapes_wc, io_ins):
        context_wc_total = np.array(
            [in_sh_wc[i] - output_shape_wc[i] for i in range(len(in_sh_wc))])
        context_vc_left = (context_wc_total / io_in.voxel_size) // 2
        context_wc_left = context_vc_left * io_in.voxel_size
        contexts_wc.append(tuple(context_wc_left.astype('uint32')))

    shape_wc = io_outs[0].shape  # assume those are the same
    assert [io_out.shape == shape_wc for io_out in io_outs
            ], "different output shapes is not implemented yet"

    @dask.delayed
    def load_offset(offset_wc):
        print("Start predicting block at", offset_wc)
        return load_input(io_ins,
                          offset_wc,
                          contexts_wc,
                          input_shapes_wc,
                          padding_mode=padding_mode)

    preprocess = dask.delayed(preprocess)
    predict = dask.delayed(prediction)

    if postprocess is not None:
        postprocess = dask.delayed(postprocess)

    # @dask.delayed(nout=2)
    # def verify_shape(offset_wc, output):
    #     def verify_array_shape(offset_arr, out_arr, io_outs):
    #         # crop if necessary
    #         if out_arr.ndim == 4:
    #             stops = [off + outs for off, outs in zip(offset_arr, out_arr.shape[1:])]
    #         elif out_arr.ndim == 3:
    #             stops = [off + outs for off, outs in zip(offset_arr, out_arr.shape)]
    #         if any(stop > dim_size for stop, dim_size in zip(stops, shape)):
    #             if out_arr.ndim == 4:
    #                 bb = ((slice(None),) +
    #                       tuple(slice(0, dim_size - off if stop > dim_size else None)
    #                             for stop, dim_size, off in zip(stops, shape, offset_arr)))
    #             elif out_arr.ndim == 3:
    #                 bb = (tuple(slice(0, dim_size - off if stop > dim_size else None)
    #                             for stop, dim_size, off in zip(stops, shape, offset_arr)))
    #             out_arr = out_arr[bb]
    #
    #         output_bounding_b = tuple(slice(off, off + outs)
    #                                     for off, outs in zip(offset_arr, output_shape))
    #
    #         return out_arr, output_bounding_b
    #
    #     if isinstance(output, list):
    #         verified_outputs = []
    #         output_bounding_box = []
    #         for out in output:
    #             assert isinstance(out, np.ndarray)
    #             o, bb = verify_array_shape(offset_wc, out)
    #             verified_outputs.append(o)
    #             output_bounding_box.append(bb)
    #         return verified_outputs, output_bounding_box
    #     elif isinstance(output, np.ndarray):
    #         return verify_array_shape(offset, output)
    #     else:
    #         raise TypeError("don't know what to do with output of type"+type(output))

    @dask.delayed()
    def verify_shape(offset_wc, output):
        outs = []
        for io_out, out in zip(io_outs, output):
            out = io_out.verify_block_shape(offset_wc, out)
            outs.append(out)
        return outs

    @dask.delayed
    def write_output(output, offsets_wc):
        for io_out, out in zip(io_outs, output):
            io_out.write(out, offsets_wc)
        return 1

    @dask.delayed
    def log(off):
        if log_processed is not None:
            with open(log_processed, 'a') as log_f:
                log_f.write(json.dumps(off) + ', ')
        return off

    # iterate over all the offsets, get the input data and predict
    results = []
    for offsets_wc in offset_list:
        output = tz.pipe(offsets_wc, log, load_offset, preprocess, predict)
        output_crop = verify_shape(offsets_wc, output)
        if postprocess is not None:
            output_crop = postprocess(output_crop, offsets_wc)
        result = write_output(output_crop, offsets_wc)
        results.append(result)

    get = functools.partial(dask.threaded.get, num_workers=num_cpus)
    # NOTE: Because dask.compute doesn't take an argument, but rather an
    # arbitrary number of arguments, computing each in turn, the output of
    # dask.compute(results) is a tuple of length 1, with its only element
    # being the results list. If instead we pass the results list as *args,
    # we get the desired container of results at the end.
    success = dask.compute(*results, get=get)
    print('Ran {0:} jobs'.format(sum(success)))
Ejemplo n.º 46
0
def run_inference_crop(prediction,
                       preprocess,
                       postprocess,
                       io_ins,
                       io_outs,
                       offset_list,
                       network_input_shapes_wc,
                       network_output_shape_wc,
                       chunk_shape_wc,
                       padding_mode='constant',
                       num_cpus=5,
                       log_processed=None,
                       pad_value=0):

    assert callable(prediction)
    assert callable(preprocess)

    n_blocks = len(offset_list)
    print("Starting prediction...")
    print("For %i blocks" % n_blocks)
    if not (isinstance(io_ins, list) or isinstance(io_ins, tuple)):
        io_ins = (io_ins, )
        network_input_shapes_wc = (network_input_shapes_wc, )

    # the additional context requested in the input
    contexts_wc = []
    for in_sh_wc, io_in in zip(network_input_shapes_wc, io_ins):
        context_wc_total = np.array([
            in_sh_wc[i] - network_output_shape_wc[i]
            for i in range(len(in_sh_wc))
        ])
        context_vc_left = (context_wc_total / io_in.voxel_size) // 2
        context_wc_left = context_vc_left * io_in.voxel_size
        contexts_wc.append(tuple(context_wc_left.astype('uint32')))

    for io in io_outs:
        assert all(chunk_sh_wc % res == 0
                   for chunk_sh_wc, res in zip(chunk_shape_wc, io.voxel_size))

    shape_wc = io_outs[0].shape  # assume those are the same
    assert [io_out.shape == shape_wc for io_out in io_outs
            ], "different output shapes is not implemented yet"

    @dask.delayed(nout=2)
    def load_offset(offset_wc):
        return load_input_crop(io_ins,
                               offset_wc,
                               contexts_wc,
                               network_input_shapes_wc,
                               padding_mode=padding_mode,
                               pad_value=pad_value)

    preprocess = dask.delayed(preprocess, nout=2)
    predict = dask.delayed(prediction, nout=2)

    if postprocess is not None:
        postprocess = dask.delayed(postprocess)

    @dask.delayed()
    def verify_shape(offset_wc, output):
        outs = []
        for io_out, out in zip(io_outs, output):
            out = io_out.verify_block_shape(offset_wc, out)
            outs.append(out)
        return outs

    @dask.delayed
    def write_output(output, offsets_wc):
        for io_out, out in zip(io_outs, output):
            print("Write output of shape", out.shape, "to", offsets_wc)
            io_out.write(out, offsets_wc)
        return 1

    @dask.delayed
    def write_output_and_log(output, offsets_wc):
        for io_out, out in zip(io_outs, output):
            io_out.write(out, offsets_wc)
        if log_processed is not None:
            with open(log_processed, 'a') as log_f:
                log_f.write(json.dumps(offsets_wc) + ',')
        return 1

    @dask.delayed
    def log(off):
        if log_processed is not None:
            with open(log_processed, 'a') as log_f:
                log_f.write(json.dumps(off) + '\n')
        return off

    @dask.delayed
    def crop_to_chunk_shape(arrs, offset_correction_wc):
        ret_arrs = []
        for io, arr in zip(io_outs, arrs):
            assert all(oc % res == 0
                       for oc, res in zip(offset_correction_wc, io.voxel_size))
            offset_correction_vc = [
                int(oc / res)
                for oc, res in zip(offset_correction_wc, io.voxel_size)
            ]
            assert all(
                (chunk_sh_wc % res == 0)
                for chunk_sh_wc, res in zip(chunk_shape_wc, io.voxel_size))
            stops_vc = [
                int(oc_vc + chunk_sh_wc / res) for oc_vc, chunk_sh_wc, res in
                zip(offset_correction_vc, chunk_shape_wc, io.voxel_size)
            ]
            bb_vc = tuple(
                slice(off_vc, stop_vc)
                for off_vc, stop_vc in zip(offset_correction_vc, stops_vc))
            ret_arrs.append(arr[bb_vc])
        return ret_arrs

    # iterate over all the offsets, get the input data and predict
    results = []
    for offsets_wc in offset_list:
        inps, offset_correction_wc = tz.pipe(offsets_wc, load_offset)
        output = tz.pipe(inps, preprocess, predict)
        chunk = crop_to_chunk_shape(output, offset_correction_wc)
        output_crop = verify_shape(offsets_wc, chunk)
        if postprocess is not None:
            output_crop = postprocess(output_crop, offsets_wc)
        result = write_output_and_log(output_crop, offsets_wc)
        results.append(result)

    # deprecated in current dask
    # success = dask.compute(*results, get=get)
    # get = functools.partial(dask.threaded.get, num_workers=num_cpus)

    # NOTE: Because dask.compute doesn't take an argument, but rather an
    # arbitrary number of arguments, computing each in turn, the output of
    # dask.compute(results) is a tuple of length 1, with its only element
    # being the results list. If instead we pass the results list as *args,
    # we get the desired container of results at the end.
    success = dask.compute(*results, scheduler='threads', num_workers=num_cpus)
    print('Ran {0:} jobs'.format(sum(success)))
Ejemplo n.º 47
0
 def guess_version(self):
     return tuple(self._version), False
Ejemplo n.º 48
0
def load_input(ios,
               offset_wc,
               contexts_wc,
               input_shapes_wc,
               padding_mode='reflect'):
    if isinstance(ios, tuple) or isinstance(ios, list):
        assert (isinstance(contexts_wc[0], tuple)
                or isinstance(contexts_wc[0], list))
        assert len(contexts_wc) == len(ios)
    else:
        ios = (ios, )
        contexts_wc = (contexts_wc, )
    datas = []

    for io, context_wc, input_shape_wc in zip(ios, contexts_wc,
                                              input_shapes_wc):

        starts_wc = [
            off_wc - context_wc[i] for i, off_wc in enumerate(offset_wc)
        ]
        stops_wc = [
            start_wc + inp_sh_wc
            for start_wc, inp_sh_wc in zip(start_wc, input_shape_wc)
        ]
        shape_wc = io.shape

        # we pad the input volume if necessary
        pad_left_wc = None
        pad_right_wc = None

        # check for padding to the left
        if any(start_wc < 0 for start_wc in starts_wc):
            pad_left_wc = tuple(
                abs(start_wc) if start_wc < 0 else 0 for start_wc in starts_wc)
            starts_wc = [max(0, start_wc) for start_wc in starts_wc]

        # check for padding to the right
        if any(stop_wc > shape_wc[i] for i, stop_wc in enumerate(stops_wc)):
            pad_right_wc = tuple(stop_wc -
                                 shape_wc[i] if stop_wc > shape_wc[i] else 0
                                 for i, stop_wc in enumerate(stops_wc))
            stops_wc = [
                min(shape_wc[i], stop_wc) for i, stop_wc in enumerate(stops_wc)
            ]

        data = io.read(starts_wc, stops_wc)
        # pad if necessary
        if pad_left_wc is not None or pad_right_wc is not None:
            pad_left_wc = (0, 0, 0) if pad_left_wc is None else pad_left_wc
            pad_right_wc = (0, 0, 0) if pad_right_wc is None else pad_right_wc
            assert all(
                pad_right_wc % res == 0
                for pad_right_wc, res in zip(pad_right_wc, io.voxel_size))
            assert all(pad_left_wc % res == 0
                       for pad_left_wc, res in zip(pad_left_wc, io.voxel_size))
            pad_right_vc = tuple(
                pad_right_wc / res
                for pad_right_wc, res in zip(pad_right_wc, io.voxel_size))
            pad_left_vc = tuple(
                pad_left_wc / res
                for pad_left_wc, res in zip(pad_left_wc, io.voxel_size))
            pad_width_vc = tuple(
                (pl_vc, pr_vc)
                for pl_vc, pr_vc in zip(pad_left_vc, pad_right_vc))
            datas.append(np.pad(data, pad_width_vc, mode=padding_mode))
        else:
            datas.append(data)
    return datas
Ejemplo n.º 49
0
def plot_energy(
    ax,
    energy,
    kind,
    bfmi,
    figsize,
    textsize,
    fill_alpha,
    fill_color,
    fill_kwargs,
    plot_kwargs,
    bw,
    legend,
    backend_kwargs,
    show,
):
    """Bokeh energy plot."""
    if backend_kwargs is None:
        backend_kwargs = {}

    backend_kwargs = {
        **backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi")),
        **backend_kwargs,
    }
    dpi = backend_kwargs.pop("dpi")

    figsize, _, _, _, line_width, _ = _scale_fig_size(figsize, textsize, 1, 1)

    fill_kwargs = {} if fill_kwargs is None else fill_kwargs
    plot_kwargs = {} if plot_kwargs is None else plot_kwargs
    plot_kwargs.setdefault("line_width", line_width)
    if kind == "hist":
        legend = False

    if ax is None:
        ax = create_axes_grid(
            1,
            figsize=figsize,
            squeeze=True,
            backend_kwargs=backend_kwargs,
        )

    _colors = [
        prop for _, prop in zip(
            range(10), cycle(mpl_rcParams["axes.prop_cycle"].by_key()
                             ["color"]))
    ]
    if (fill_color[0].startswith("C")
            and len(fill_color[0]) == 2) and (fill_color[1].startswith("C")
                                              and len(fill_color[1]) == 2):
        fill_color = tuple(
            (_colors[int(color[1:]) % 10] for color in fill_color))
    elif fill_color[0].startswith("C") and len(fill_color[0]) == 2:
        fill_color = tuple([_colors[int(fill_color[0][1:]) % 10]] +
                           list(fill_color[1:]))
    elif fill_color[1].startswith("C") and len(fill_color[1]) == 2:
        fill_color = tuple(
            list(fill_color[1:]) + [_colors[int(fill_color[0][1:]) % 10]])

    series = zip(
        fill_alpha,
        fill_color,
        ("Marginal Energy", "Energy transition"),
        (energy - energy.mean(), np.diff(energy)),
    )

    labels = []

    if kind == "kde":
        for alpha, color, label, value in series:
            fill_kwargs["fill_alpha"] = alpha
            fill_kwargs["fill_color"] = vectorized_to_hex(color)
            plot_kwargs["line_alpha"] = alpha
            plot_kwargs["line_color"] = vectorized_to_hex(color)
            _, glyph = plot_kde(
                value,
                bw=bw,
                label=label,
                fill_kwargs=fill_kwargs,
                plot_kwargs=plot_kwargs,
                ax=ax,
                legend=legend,
                backend="bokeh",
                backend_kwargs={},
                show=False,
                return_glyph=True,
            )
            labels.append((
                label,
                glyph,
            ))

    elif kind == "hist":
        hist_kwargs = plot_kwargs.copy()
        hist_kwargs.update(**fill_kwargs)

        for alpha, color, label, value in series:
            hist_kwargs["fill_alpha"] = alpha
            hist_kwargs["fill_color"] = vectorized_to_hex(color)
            hist_kwargs["line_color"] = None
            hist_kwargs["line_alpha"] = alpha
            _histplot_bokeh_op(
                value.flatten(),
                values2=None,
                rotated=False,
                ax=ax,
                hist_kwargs=hist_kwargs,
                is_circular=False,
            )

    else:
        raise ValueError(f"Plot type {kind} not recognized.")

    if bfmi:
        for idx, val in enumerate(e_bfmi(energy)):
            bfmi_info = Label(
                x=int(figsize[0] * dpi * 0.58),
                y=int(figsize[1] * dpi * 0.73) - 20 * idx,
                x_units="screen",
                y_units="screen",
                text=f"chain {idx:>2} BFMI = {val:.2f}",
                render_mode="css",
                border_line_color=None,
                border_line_alpha=0.0,
                background_fill_color="white",
                background_fill_alpha=1.0,
            )

            ax.add_layout(bfmi_info)

    if legend and label is not None:
        legend = Legend(
            items=labels,
            location="center_right",
            orientation="horizontal",
        )
        ax.add_layout(legend, "above")
        ax.legend.click_policy = "hide"

    show_layout(ax, show)

    return ax
Ejemplo n.º 50
0
INS_YK4_CAPABILITIES = 0x1d

U2F_VENDOR_FIRST = 0x40
TYPE_INIT = 0x80
U2FHID_PING = TYPE_INIT | 0x01
U2FHID_YUBIKEY_DEVICE_CONFIG = TYPE_INIT | U2F_VENDOR_FIRST
U2FHID_YK4_CAPABILITIES = TYPE_INIT | U2F_VENDOR_FIRST + 2

try:
    u2fh = U2fh('u2f-host', '0')

    # TODO: Allow debug output
    if u2fh.u2fh_global_init(0) is not 0:
        raise Exception('u2fh_global_init failed!')
    libversion = tuple(
        int(x)
        for x in u2fh.u2fh_check_version(None).decode('ascii').split('.'))
except Exception as e:
    logger.error('libu2f-host not found', exc_info=e)
    u2fh = MissingLibrary(
        'libu2f-host not found, U2F connectability not available!')
    libversion = None


class U2FHostError(Exception):
    """Thrown if u2f-host call fails."""
    def __init__(self, errno):
        self.errno = errno
        self.message = '{}: {}'.format(u2fh.u2fh_strerror_name(errno),
                                       u2fh.u2fh_strerror(errno))
Ejemplo n.º 51
0
def resize_unit_cell(struct, resize):
    """Resize the StructureData according to the resize Dict"""
    resize_tuple = tuple([resize[x] for x in ['nx', 'ny', 'nz']])
    return StructureData(ase=struct.get_ase().repeat(resize_tuple))
            eflux_arr[ind] = np.sqrt(eflux**2 + (vflux / 5)**2)

    hdict = []
    ldict = []

    if 'BX' in labels and np.any(jvla_BX['snr'] > 50):
        freq_arr, flux_arr, eflux_arr = rmfit.prep_fit_arr(
            freq_arr, flux_arr, eflux_arr, alpha_BX)
        hdict.append('IB')
    if 'AX' in labels and np.any(jvla_AX['snr'] > 50):
        freq_arr, flux_arr, eflux_arr = rmfit.prep_fit_arr(
            freq_arr, flux_arr, eflux_arr, alpha_AX)
        hdict.append('IB')

    msk = [freq_arr > 0]
    nu_arr = freq_arr[tuple(msk)]
    s_arr = flux_arr[tuple(msk)]
    es_arr = eflux_arr[tuple(msk)]

    #calculation of alhigh and alow:
    #Strategy: Identify peak and calculate points before and after peak location
    min_cat = labels[np.argmin(nu_arr)]
    min_nu = survey_info[min_cat]['Freq']
    max_cat = 'BX'
    max_nu = 10
    if 'AX' in labels:
        max_cat = 'AX'
    nup = sed_fit_params[kk]['nu_p']
    e_nup = sed_fit_params[kk]['e_nu_p']
    if nup < 10 and nup > min_nu:
        hind = np.nonzero(nu_arr > nu_p)[0]
Ejemplo n.º 53
0
def wrap_request_class(cls):
    return wraps(cls, assigned=tuple(WRAPPER_ASSIGNMENTS) + ("from_dict", ))
Ejemplo n.º 54
0
def check(sample_tuple):
    return all(i== sample_tuple[0] for i in sample_tuple)

tuple1=(45,45,45,45)

print(check(tuple1))


import sys
sys.exit(0)

"""
Sort a tuple of tuples by 2nd item
"""
tuple1=(('a',23),('b',37),('c',11),('d',29))
tuple1=tuple(sorted(list(tuple1),key=lambda x:x[1]))
print(tuple1)


"""
copy element 44 and 55 from the following tuple into a new tuple
"""
tuple1=(11,22,33,44,55,66)
tuple2=tuple1[3:-1]
print(tuple2)



import sys
sys.exit(0)
"""
Ejemplo n.º 55
0
 def _known_arguments(self):
     """The arguments that can be passed to ``apply`` when running the Operator."""
     ret = set.union(
         *[set(i._arg_names) for i in self.input + self.dimensions])
     return tuple(sorted(ret))
Ejemplo n.º 56
0
    def add_error_handler(self, exception, handler=None):
        """Register a handler for one or more exception types.

        Error handlers may be registered for any exception type, including
        :class:`~.HTTPError` or :class:`~.HTTPStatus`. This feature
        provides a central location for logging and otherwise handling
        exceptions raised by responders, hooks, and middleware components.

        A handler can raise an instance of :class:`~.HTTPError` or
        :class:`~.HTTPStatus` to communicate information about the issue to
        the client.  Alternatively, a handler may modify `resp`
        directly.

        Error handlers are matched in LIFO order. In other words, when
        searching for an error handler to match a raised exception, and
        more than one handler matches the exception type, the framework
        will choose the one that was most recently registered.
        Therefore, more general error handlers (e.g., for the
        standard ``Exception`` type) should be added first, to avoid
        masking more specific handlers for subclassed types.

        .. Note::

            By default, the framework installs two handlers, one for
            :class:`~.HTTPError` and one for :class:`~.HTTPStatus`. These can
            be overridden by adding a custom error handler method for the
            exception type in question.

        Args:
            exception (type or iterable of types): When handling a request,
                whenever an error occurs that is an instance of the specified
                type(s), the associated handler will be called. Either a single
                type or an iterable of types may be specified.
            handler (callable): A function or callable object taking the form
                ``func(req, resp, ex, params)``.

                If not specified explicitly, the handler will default to
                ``exception.handle``, where ``exception`` is the error
                type specified above, and ``handle`` is a static method
                (i.e., decorated with @staticmethod) that accepts
                the same params just described. For example::

                    class CustomException(CustomBaseException):

                        @staticmethod
                        def handle(req, resp, ex, params):
                            # TODO: Log the error
                            # Convert to an instance of falcon.HTTPError
                            raise falcon.HTTPError(falcon.HTTP_792)

                If an iterable of exception types is specified instead of
                a single type, the handler must be explicitly specified.

        """
        def wrap_old_handler(old_handler):
            @wraps(old_handler)
            def handler(req, resp, ex, params):
                old_handler(ex, req, resp, params)

            return handler

        if handler is None:
            try:
                handler = exception.handle
            except AttributeError:
                raise AttributeError('handler must either be specified '
                                     'explicitly or defined as a static'
                                     'method named "handle" that is a '
                                     'member of the given exception class.')

        # TODO(vytas): Remove this shimming in a future Falcon version.
        arg_names = tuple(misc.get_argnames(handler))
        if (arg_names[0:1] in (('e', ), ('err', ), ('error', ), ('ex', ),
                               ('exception', ))
                or arg_names[1:3] in (('req', 'resp'),
                                      ('request', 'response'))):
            handler = wrap_old_handler(handler)

        try:
            exception_tuple = tuple(exception)
        except TypeError:
            exception_tuple = (exception, )

        if all(issubclass(exc, BaseException) for exc in exception_tuple):
            # Insert at the head of the list in case we get duplicate
            # adds (will cause the most recently added one to win).
            if len(exception_tuple) == 1:
                # In this case, insert only the single exception type
                # (not a tuple), to avoid unnnecessary overhead in the
                # exception handling path.
                self._error_handlers.insert(0, (exception_tuple[0], handler))
            else:
                self._error_handlers.insert(0, (exception_tuple, handler))
        else:
            raise TypeError('"exception" must be an exception type.')
Ejemplo n.º 57
0
 def _fks(self):
     rtn = defaultdict(set)
     for fk in self.tic_dat_factory.foreign_keys:
         rtn[fk.native_table].add(fk)
     return FrozenDict({k:tuple(v) for k,v in rtn.items()})
Ejemplo n.º 58
0
 def as_tuple(self, value):
     """Utility function which converts lists to tuples."""
     if isinstance(value, list):
         value = tuple(value)
     return value
Ejemplo n.º 59
0
 def __init__(self, **kwargs):
     self.hints = tuple(kwargs.items())
     self.view_map = {0: [0]}
Ejemplo n.º 60
0
_MAX_CONNECTION_ATTEMPTS = 10

# Port to expect the docker environment to internally listen on.
_DOCKER_INTERNAL_GRPC_PORT = 10000

_DEFAULT_DOCKER_IMAGE_NAME = 'gcr.io/deepmind-environments/alchemy:v1.0.0'

_ALCHEMY_OBSERVATIONS = ('RGB_INTERLEAVED', 'ACCELERATION', 'HAND_FORCE',
                         'HAND_IS_HOLDING', 'HAND_DISTANCE', 'Score', 'events')

ALCHEMY_LEVEL_NAMES = frozenset((
    'alchemy/perceptual_mapping_randomized_with_rotation_and_random_bottleneck',
    'alchemy/all_fixed',
    'alchemy/all_fixed_w_shaping',
    'alchemy/perceptual_mapping_randomized_with_random_bottleneck',
) + tuple(f'alchemy/evaluation_episodes/{i}' for i in range(1000)))


@dataclasses.dataclass
class _ConnectionDetails:
    channel: grpc.Channel
    connection: dm_env_rpc_connection.Connection
    specs: dm_env_rpc_pb2.ActionObservationSpecs


def _maybe_as_partial_spec(spec: array_specs.Array):
    if -1 not in spec.shape:
        return spec

    if isinstance(spec, array_specs.BoundedArray):
        raise ValueError('Partial bounded arrays are not yet handled.')