Example #1
1
def iterunique(source, key):
    # assume source is sorted
    # first need to sort the data
    it = iter(source)

    hdr = next(it)
    yield tuple(hdr)

    # convert field selection into field indices
    if key is None:
        indices = range(len(hdr))
    else:
        indices = asindices(hdr, key)

    # now use field indices to construct a _getkey function
    # N.B., this may raise an exception on short rows, depending on
    # the field selection
    getkey = operator.itemgetter(*indices)

    prev = next(it)
    prev_key = getkey(prev)
    prev_comp_ne = True

    for curr in it:
        curr_key = getkey(curr)
        curr_comp_ne = curr_key != prev_key
        if prev_comp_ne and curr_comp_ne:
            yield tuple(prev)
        prev = curr
        prev_key = curr_key
        prev_comp_ne = curr_comp_ne

    # last one?
    if prev_comp_ne:
        yield prev
Example #2
0
 def __new__(cls, spec):
     if isinstance(spec, cls):
         return spec
     self = object.__new__(cls)
     self.spec = spec
     if isinstance(spec, tuple):
         self.match = self.all_match_ if spec[0] == 'all' else self.any_match_
     elif '|' in spec:
         return VersionSpec(('any', tuple(VersionSpec(s) for s in spec.split('|'))))
     elif ',' in spec:
         return VersionSpec(('all', tuple(VersionSpec(s) for s in spec.split(','))))
     elif spec.startswith(('=', '<', '>', '!')):
         m = version_relation_re.match(spec)
         if m is None:
             raise RuntimeError('Invalid version spec: %s' % spec)
         op, b = m.groups()
         self.op = opdict[op]
         self.cmp = VersionOrder(b)
         self.match = self.veval_match_
     else:
         self.spec = spec
         rx = spec.replace('.', r'\.')
         rx = rx.replace('+', r'\+')
         rx = rx.replace('*', r'.*')
         rx = r'(%s)$' % rx
         self.regex = re.compile(rx)
         self.match = self.regex_match_
     return self
Example #3
0
 def get_form_count(root_node, form_id):
     # Check if nodes are already linked to entities before offering to remove a node from form
     node_ids = NodeMapper.get_all_sub_ids(root_node, [])
     if not node_ids:  # There are no sub nodes so skipping test
         return
     g.cursor.execute("SELECT name FROM web.form WHERE id = %(form_id)s;", {'form_id': form_id})
     form_name = g.cursor.fetchone()[0]
     system_type = ''
     class_code = ''
     if form_name == 'Source':
         system_type = 'source content'
     elif form_name == 'Event':
         class_code = app.config['CLASS_CODES']['event']
     elif form_name == 'Person':
         class_code = ['E21']
     elif form_name == 'Group':
         class_code = ['E74']
     elif form_name == 'Legal Body':
         class_code = ['E40']
     else:
         system_type = form_name.lower()
     sql = """
         SELECT count(*) FROM model.link l
         JOIN model.entity e ON l.domain_id = e.id AND l.range_id IN %(node_ids)s
         WHERE l.property_code = 'P2' AND {sql_where} %(params)s;""".format(
            sql_where='e.system_type =' if system_type else 'e.class_code IN')
     g.cursor.execute(sql, {
         'node_ids': tuple(node_ids),
         'params': system_type if system_type else tuple(class_code)})
     debug_model['div sql'] += 1
     return g.cursor.fetchone()[0]
 def _get_sales_data(self, cr, uid, period_ids, curr_period, sale_id, context=None):
     if not curr_period:
         curr_p = self.pool.get('account.period').find(cr, uid, context=context)[0]
         period_ids = [x for x in period_ids if x != curr_p]
     if not sale_id:
         sql = """
             select ml.partner_id as pid, ml.period_id as period, p.name as pnm, p.is_company as company, sum(ml.credit - ml.debit) as amount
             from account_move_line ml
             left join res_partner p on (ml.partner_id = p.id)
             left join account_account ac on (ml.account_id = ac.id)
             where ml.period_id in %s
             and ac.reports = True
             group by ml.partner_id, ml.period_id, p.name, p.is_company
             order by ml.partner_id, ml.period_id
             """ % (str(tuple(period_ids)))
     else:
         sql = """
             select ml.partner_id as pid, ml.period_id as period, p.name as pnm, p.is_company as company, sum(ml.credit - ml.debit) as amount
             from account_move_line ml
             left join account_invoice inv on (ml.move_id = inv.move_id)
             left join res_partner p on (ml.partner_id = p.id)
             left join account_account ac on (ml.account_id = ac.id)
             where ml.period_id in %s
             and inv.user_id = %s
             and ac.reports = True
             group by ml.partner_id, ml.period_id, p.name, p.is_company
             order by ml.partner_id, ml.period_id
             """ % (str(tuple(period_ids)), sale_id)
     cr.execute(sql)
     return cr.dictfetchall()
Example #5
0
def solve(par):
    C, combine, D, opposite, N, S = par
    comb = {}
    for c in combine:
        x = list(c)[:2]
        comb[tuple(x)] = c[2]
        x.reverse()
        comb[tuple(x)] = c[2]
    oppo = defaultdict(list)
    for o in opposite:
        oppo[o[0]].append(o[1])
        oppo[o[1]].append(o[0])
    
    result = []
    for s in list(S):
        if len(result) > 0 and (result[-1], s) in comb:
            c = result[-1]
            result.pop()
            result.append(comb[(c, s)])
            continue
        
        flag = True
        if s in oppo:
            for x in oppo[s]:
                if x in result:
                    result = []
                    flag = False
                    break
        if flag:
            result.append(s)
            
    return '[' + ', '.join(result) + ']'
Example #6
0
    def emit_Conv(self, IR_node):
        self.used_layers.add(IR_node.type)

        dim = len(IR_node.get_attr('strides')) - 2

        in_channels = IR_node.get_attr('kernel_shape')[-2]
        filter = IR_node.get_attr('kernel_shape')[-1]
        kernel = IR_node.get_attr('kernel_shape')[:-2]
        strides = IR_node.get_attr('strides')[1:-1]

        self.add_init(2, "self.{} = self.__conv({}, name='{}', in_channels={}, out_channels={}, kernel_size={}, stride={}, groups={}, bias={})".format(
            IR_node.variable_name,
            dim,
            IR_node.name,
            in_channels,
            filter,
            tuple(kernel),
            tuple(strides),
            # padding,
            IR_node.get_attr('group', 1),
            IR_node.get_attr('use_bias')))

        input_node = self._defuse_padding(IR_node)
        self.add_body(2, "{:<15} = self.{}({})".format(
            IR_node.variable_name,
            IR_node.variable_name,
            input_node))

        if self.weight_loaded:
            self.weights_dict[IR_node.name]['weights'] = np.transpose(self.weights_dict[IR_node.name]['weights'], [dim + 1, dim] + list(range(0, dim)))
Example #7
0
def test_point_slicing_with_full_slice():
    from dask.array.core import _vindex_transpose, _get_axis
    x = np.arange(4*5*6*7).reshape((4, 5, 6, 7))
    d = da.from_array(x, chunks=(2, 3, 3, 4))

    inds = [
            [[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
            [[1, 2, 3], None, [4, 3, 2], None],
            [[1, 2, 3], [3, 2, 1]],
            [[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
            [[], [], [], None],
            [np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
            [None, None, [1, 2, 3], [4, 3, 2]],
            [None, [0, 2, 3], None, [0, 3, 2]],
            ]

    for ind in inds:
        slc = [i if isinstance(i, (np.ndarray, list)) else slice(None, None)
                for i in ind]
        result = d.vindex[tuple(slc)]

        # Rotate the expected result accordingly
        axis = _get_axis(ind)
        expected = _vindex_transpose(x[tuple(slc)], axis)

        assert eq(result, expected)

        # Always have the first axis be the length of the points
        k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
        assert result.shape[0] == k
Example #8
0
    def __init__(self, n):
        """ Create a RangeCount with a given size.

        :param n: number of counters
        :type n: int
        """

        if not n > 0:
            raise ValueError("the number of counters must be positive.")

        trees = []
        offsets = []
        self._n = n

        base = 1
        offset = 0
        while base <= self._n:
            if self._n & base:
                trees.append(_FCTree(base))
                offsets.append(offset)
                offset += base
            base <<= 1

        self._trees = tuple(trees)
        self._offsets = tuple(offsets)
        self._lock = threading.Lock()
Example #9
0
def _set_element_code_selection_form_field(form, form_field_name, form_field_label, element_id,
                                           elements, element_code_att_name, element_name_att_name):
    element_display_str = "{code_att_name}:{name_att_name}"
    if len(elements) > 0:
        if len(form.initial) > 0:
            element_code_choices = [(getattr(element, element_code_att_name),
                                     element_display_str.format(
                                         code_att_name=str(getattr(element, element_code_att_name)),
                                         name_att_name=getattr(element, element_name_att_name))
                                     ) for element in elements if element.id != element_id]

            element_code_choices = tuple([(form.initial[element_code_att_name],
                                          element_display_str.format(
                                            code_att_name=str(form.initial[element_code_att_name]),
                                            name_att_name=form.initial[element_name_att_name]))] +
                                         element_code_choices + [("----", "----")])

        else:
            element_code_choices = [(getattr(element, element_code_att_name),
                                     element_display_str.format(
                                         code_att_name=str(getattr(element, element_code_att_name)),
                                         name_att_name=getattr(element, element_name_att_name)))
                                    for element in elements]

            element_code_choices = tuple([("----", "----")] + element_code_choices)

        form.fields[form_field_name].widget = forms.Select(
            choices=element_code_choices)
        form.fields[form_field_name].label = form_field_label
Example #10
0
 def _all_commands(self):
     path = builtins.__xonsh_env__.get('PATH', [])
     # did PATH change?
     path_hash = hash(tuple(path))
     cache_valid = path_hash == self._path_checksum
     self._path_checksum = path_hash
     # did aliases change?
     al_hash = hash(tuple(sorted(builtins.aliases.keys())))
     self._alias_checksum = al_hash
     cache_valid = cache_valid and al_hash == self._alias_checksum
     pm = self._path_mtime
     # did the contents of any directory in PATH change?
     for d in filter(os.path.isdir, path):
         m = os.stat(d).st_mtime
         if m > pm:
             pm = m
             cache_valid = False
     self._path_mtime = pm
     if cache_valid:
         return self._cmds_cache
     allcmds = set()
     for d in filter(os.path.isdir, path):
         allcmds |= set(os.listdir(d))
     allcmds |= set(builtins.aliases.keys())
     self._cmds_cache = frozenset(allcmds)
     return self._cmds_cache
Example #11
0
    def test_reduce_inner(numpy_reduce_func, nd_reduce_func, multi_axes):
        for i in range(sample_num):
            ndim = np.random.randint(1, 6)
            shape = np.random.randint(1, 11, size=ndim)
            dat = np.random.rand(*shape) - 0.5
            keepdims = np.random.randint(0, 2)
            if multi_axes:
                axis_flags = np.random.randint(0, 2, size=ndim)
                axes = []
                for (axis, flag) in enumerate(axis_flags):
                    if flag:
                        axes.append(axis)
                if 0 == len(axes):
                    axes = tuple(range(ndim))
                else:
                    axes = tuple(axes)
            else:
                axes = np.random.randint(0, ndim)
            numpy_ret = numpy_reduce_func(dat, axis=axes, keepdims=keepdims)

            ndarray_ret = nd_reduce_func(mx.nd.array(dat), axis=axes, keepdims=keepdims)
            if type(ndarray_ret) is mx.ndarray.NDArray:
                ndarray_ret = ndarray_ret.asnumpy()
            assert (ndarray_ret.shape == numpy_ret.shape) or \
                   (ndarray_ret.shape == (1,) and numpy_ret.shape == ()), "nd:%s, numpy:%s" \
                                                         %(ndarray_ret.shape, numpy_ret.shape)
            err = np.square(ndarray_ret - numpy_ret).mean()
            assert err < 1E-4
    def __init__(self, provides, *args, **kwargs):
        """Initializer.

        :param provides: Wrapped callable.
        :type provides: callable

        :param args: Tuple of injections.
        :type args: tuple

        :param kwargs: Dictionary of injections.
        :type kwargs: dict
        """
        if not callable(provides):
            raise Error('Provider {0} expected to get callable, '
                        'got {0}'.format('.'.join((self.__class__.__module__,
                                                   self.__class__.__name__)),
                                         provides))

        self.provides = provides

        self.args = tuple()
        self.kwargs = tuple()

        self.add_injections(*args, **kwargs)

        super(Callable, self).__init__()
    def handle_user(self, data):
        '''
        Insert user informations in data

        Override it to add extra user attributes.
        '''
        # Default to unauthenticated anonymous user
        data['user'] = {
            'username': '',
            'is_authenticated': False,
            'is_staff': False,
            'is_superuser': False,
            'permissions': tuple(),
        }
        if 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE_CLASSES:
            user = self.request.user
            data['user']['is_authenticated'] = user.is_authenticated()
            if hasattr(user, 'username'):
                data['user']['username'] = user.username
            elif hasattr(user, 'get_username'):
                data['user']['username'] = user.get_username()
            if hasattr(user, 'is_staff'):
                data['user']['is_staff'] = user.is_staff
            if hasattr(user, 'is_superuser'):
                data['user']['is_superuser'] = user.is_superuser
            if hasattr(user, 'get_all_permissions'):
                data['user']['permissions'] = tuple(user.get_all_permissions())
Example #14
0
    def test_streaming(self):
        input_iter = iter(xrange(int(10000)))
        doubled_stream = vimap.ext.sugar.imap_ordered(
            lambda x: 2 * x,
            input_iter
        )

        # take a few from the doubled output stream
        consumed = tuple(itertools.islice(doubled_stream, 40))

        # exhaust the input
        unspooled_input = tuple(input_iter)

        # now take the rest from the output stream
        rest = tuple(doubled_stream)

        num_processed = len(consumed) + len(rest)

        T.assert_gt(
            len(unspooled_input),
            9000,
            message="Most inputs should not be processed (too much spooling / "
                    "not lazy). Only {0} remained.".format(len(unspooled_input))
        )
        assert num_processed + len(unspooled_input) == 10000, "Something got dropped"

        T.assert_equal(
            consumed + rest,
            tuple(2 * i for i in xrange(num_processed)),
            message="Processed inputs weren't the first in the stream, or are out of order."
        )
Example #15
0
 def __init__(self, data=[], Gap=None, MolType=None):
     """Returns a new JointEnumeration object. See class docstring for info.
     
     Expects a list of Enumeration objects, or objects that can be coerced
     into Enumeration objects (basically, anything that can be a tuple).
     
     Does NOT have an independent concept of a gap -- gets the gaps from the
     constituent subenumerations.
     """
     self.SubEnumerations = self._coerce_enumerations(data)
     sub_enum_lengths = map(len, self.SubEnumerations)
     #build factors for combining symbols.
     curr_factor = 1
     sub_enum_factors = [curr_factor]
     for i in sub_enum_lengths[-1:0:-1]:
         curr_factor *= i
         sub_enum_factors = [curr_factor] + sub_enum_factors
     self._sub_enum_factors = transpose(array([sub_enum_factors]))
     
     try:
         #figure out the gaps correctly
         gaps = [i.Gap for i in self.SubEnumerations]
         self.Gap = tuple(gaps)
         gap_indices = array([i.GapIndex for i in self.SubEnumerations])
         gap_indices *= sub_enum_factors
         self.GapIndex = sum(gap_indices)
     except (TypeError, AttributeError): #index not settable
         self.Gap = None
     
     super(JointEnumeration, self).__init__(self, self.Gap)
     #remember to reset shape after superclass init
     self.Shape = tuple(sub_enum_lengths)
def import_dataset(filename):
    store = []
    settings = {}

    with open(filename, 'r') as f:

        line = f.readline().rstrip('\n')

        # Check if the file has a settings header and read out
        if line[:5] == 'START':
            line = f.readline().rstrip('\n')
            while line[:3] != 'END':
                parts = line.split(',')
                settings[parts[0]] = parts[1]
                line = f.readline().rstrip('\n')
            line = f.readline().rstrip('\n')

        # Extract the column names and data formats
        names = tuple(line.split(','))
        line = f.readline().rstrip('\n')
        formats = tuple(line.split(','))

        # Populate the store with data fields
        for line in f.readlines():
            parts = line.rstrip('\n').split(',')
            store.append(tuple(parts))

    # File is now closed
    out = np.array(store, dtype={'names': names, 'formats': formats})
    return out, settings
Example #17
0
    def __new__(cls, fields, values):
        fields = tuple(fields)
        values = tuple(values)

        if len(fields) != len(values):
            raise ValueError(
                "`fields` and `values` must have matching length: %d != %d" %
                (len(fields), len(values)))

        # Create tuple instance, store fields, and create read-only attributes
        # for each field name. Fields must be stored for pickling/copying (see
        # `__getnewargs__`).
        #
        # Note: setting field names as attributes allows for tab-completion in
        # interactive contexts! Using `__getattr__` does not support this.
        self = super().__new__(cls, values)

        # Must set attributes this way because `__setattr__` prevents
        # setting directly (necessary for immutability).
        object.__setattr__(self, '_fields', fields)

        # Attach field names as instance attributes.
        for field, value in zip(fields, values):
            object.__setattr__(self, field, value)

        return self
Example #18
0
    def sortEdgesForBiasSeed(part, edges, center):
        '''
        Arguments:
        part: reference to the part object
        edges: a tuple of edges 
        center: a Point instance indicating the center of the fine mesh region

        This function returns the tuples e1 and e2 which corresponds to the arguments needed in seedEdgeByBias()
        where end1Edges = e1 and end2Edges = e2. In the seedEdgeByBias(), the smallest elements are 
        positioned near the first vertice on the edge for edges passed in to end1Edges and visa versa.
        '''

        # Check that e is a tuple of edges TODO
        # Check that c is a tuple of three floats TODO

        e1 = list()
        e2 = list()

        for e in edges:
            (v1, v2) = e.getVertices()
            v1Distance = Point.distance(Point.fromVertex(part.vertices[v1]), center)
            v2Distance = Point.distance(Point.fromVertex(part.vertices[v2]), center)

            if v1Distance < v2Distance:
                e1.append(e)
            else:
                e2.append(e)

        return (tuple(e1), tuple(e2))
 def changeFromProperties(self, props):
     '''
     Returns 1 if changes were specified.
     '''
     if props is None:
         return 0
     res = 0
     s = props.get('guard_permissions', None)
     if s:
         res = 1
         p = [ permission.strip() for permission in s.split(';') ]
         self.permissions = tuple(p)
     s = props.get('guard_roles', None)
     if s:
         res = 1
         r = [ role.strip() for role in s.split(';') ]
         self.roles = tuple(r)
     s = props.get('guard_groups', None)
     if s:
         res = 1
         g = [ group.strip() for group in s.split(';') ]
         self.groups = tuple(g)
     s = props.get('guard_expr', None)
     if s:
         res = 1
         self.expr = Expression(s)
     return res
Example #20
0
def save_log( filename, bg, log_item ):
    """Saves band gaps, valid flags, eigenfrequencies."""

    fd = open( filename, 'w' )
    freq_range = bg.freq_range_margins
    fd.write( log_item )
    fd.write( 'squared: %s\n' % False )
    fd.write( 'n_zeroed: %d\n' % bg.n_zeroed )
    fd.write( 'n_eigs: %d\n' % bg.n_eigs )
    fd.write( 'f0 f1 flag_min f_min v_min flag_max f_max v_max'
              ' kind\ndesc\n' )
    format = "%f %f %d %f %f %d %f %f %s\n%s\n"

    n_row = len( freq_range ) - 1
    fd.write( '%d\n' % n_row )
    for ir in xrange( n_row ):
        f0, f1 = freq_range[[ir, ir+1]]
        gmin, gmax = bg.gaps[ir]
        fd.write( format % ((f0, f1) + tuple( gmin ) + tuple( gmax )
                            + bg.kinds[ir]) )

    fd.write( 'valid resonance\n' )
    freq_range = bg.freq_range_initial
    n_row = len( freq_range )
    fd.write( '%d\n' % n_row )
    valid_in_range = bg.valid[bg.eig_range]
    for ir in xrange( n_row ):
        fd.write( '%d %f\n' % (valid_in_range[ir], freq_range[ir] ) )
    fd.close()
    def load(cls, dirname=''):
        normdir = os.path.normpath(dirname)
        code, data = _run_command(['svn', 'info', normdir])
        # Must check for some contents, as some use empty directories
        # in testcases
        svn_dir = os.path.join(normdir, '.svn')
        has_svn = (os.path.isfile(os.path.join(svn_dir, 'entries')) or
                   os.path.isfile(os.path.join(svn_dir, 'dir-props')) or
                   os.path.isfile(os.path.join(svn_dir, 'dir-prop-base')))

        svn_version = tuple(cls.get_svn_version().split('.'))

        try:
            base_svn_version = tuple(int(x) for x in svn_version[:2])
        except ValueError:
            base_svn_version = tuple()

        if has_svn and (code or not base_svn_version 
                             or base_svn_version < (1, 3)):
            warnings.warn(("No SVN 1.3+ command found: falling back "
                           "on pre 1.7 .svn parsing"), DeprecationWarning)
            return SvnFileInfo(dirname)
        elif not has_svn:
            return SvnInfo(dirname)
        elif base_svn_version < (1, 5):
            return Svn13Info(dirname)
        else:
            return Svn15Info(dirname)
Example #22
0
    def mergeConcat_branch(self, concatnode):
        nodelist = []
        last_node = None
        for n in concatnode.nextnodes:
            if last_node is None: pass
            elif not isinstance(last_node, DFBranch): pass
            elif not isinstance(n, DFBranch): pass
            elif last_node.condnode == n.condnode:
                truenode_list = (last_node.truenode, n.truenode)
                falsenode_list = (last_node.falsenode, n.falsenode)
                new_truenode_list = []
                new_falsenode_list = []
                pos = 0
                for t in truenode_list:
                    if t is None: new_truenode_list.append(DFUndefined(self.getWidth(falsenode_list[pos])))
                    else: new_truenode_list.append(t)
                    pos += 1

                pos = 0
                for f in falsenode_list:
                    if f is None: new_falsenode_list.append(DFUndefined(self.getWidth(truenode_list[pos])))
                    else: new_falsenode_list.append(f)
                    pos += 1

                new_node = DFBranch(last_node.condnode, DFConcat(tuple(new_truenode_list)), DFConcat(tuple(new_falsenode_list)))
                last_node = new_node
                nodelist.pop()
                nodelist.append(new_node)
                continue
            nodelist.append(n)
            last_node = n
        if len(nodelist) == 1: return nodelist[0]
        return DFConcat(tuple(nodelist))
Example #23
0
    def emit_Pool(self, IR_node):
        dim = len(IR_node.get_attr('strides')) - 2

        if IR_node.get_attr('pooling_type') == "MAX":
            pool_name = "max_pool{}d".format(dim)
            exstr = ", value=float('-Inf')"
        elif IR_node.get_attr('pooling_type') == "AVG":
            pool_name = "avg_pool{}d".format(dim)
            exstr = ""
        else:
            assert False

        if IR_node.layer.attr['global_pooling'].b:
            self.add_body(2, "{:<15} = F.{}(input = {}, kernel_size = {}.size()[2:])".format(
                IR_node.variable_name,
                pool_name,
                self.parent_variable_name(IR_node),
                self.parent_variable_name(IR_node)
            ))

        else:
            for e in IR_node.get_attr('dilations', []):
                assert e == 1

            pool_size = IR_node.get_attr('kernel_shape')[1:-1]
            strides = IR_node.get_attr('strides')[1:-1]

            input_node = self._defuse_padding(IR_node, exstr)
            self.add_body(2, "{:<15} = F.{}({}, kernel_size={}, stride={})".format(
                IR_node.variable_name,
                pool_name,
                input_node,
                tuple(pool_size),
                tuple(strides)
                ))
Example #24
0
    def _do_fetch(self):
        assert not self.results

        # If we're manually excluding PKs, and we've specified a limit to the results
        # we need to make sure that we grab more than we were asked for otherwise we could filter
        # out too many! These are again limited back to the original request limit
        # while we're processing the results later
        excluded_pk_count = 0
        if self.excluded_pks and self.limits[1]:
            excluded_pk_count = len(self.excluded_pks)
            self.limits = tuple([self.limits[0], self.limits[1] + excluded_pk_count])

        self.results = self._run_query(
            aggregate_type=self.aggregate_type,
            start=self.limits[0],
            limit=None if self.limits[1] is None else (self.limits[1] - (self.limits[0] or 0))
        )

        # Ensure that the results returned is reset
        self.results_returned = 0

        if excluded_pk_count:
            # Reset the upper limit if we adjusted it above
            self.limits = tuple([self.limits[0], self.limits[1] - excluded_pk_count])

        self.query_done = True
Example #25
0
def check_compatible(client, min_version=None, max_version=None):
    """Checks if a kazoo client is backed by a zookeeper server version.

    This check will verify that the zookeeper server version that the client
    is connected to satisfies a given minimum version (inclusive) and
    maximum (inclusive) version range. If the server is not in the provided
    version range then a exception is raised indiciating this.
    """
    server_version = None
    if min_version:
        server_version = tuple((int(a) for a in client.server_version()))
        min_version = tuple((int(a) for a in min_version))
        if server_version < min_version:
            pretty_server_version = ".".join([str(a) for a in server_version])
            min_version = ".".join([str(a) for a in min_version])
            raise exc.IncompatibleVersion("Incompatible zookeeper version"
                                          " %s detected, zookeeper >= %s"
                                          " required" % (pretty_server_version,
                                                         min_version))
    if max_version:
        if server_version is None:
            server_version = tuple((int(a) for a in client.server_version()))
        max_version = tuple((int(a) for a in max_version))
        if server_version > max_version:
            pretty_server_version = ".".join([str(a) for a in server_version])
            max_version = ".".join([str(a) for a in max_version])
            raise exc.IncompatibleVersion("Incompatible zookeeper version"
                                          " %s detected, zookeeper <= %s"
                                          " required" % (pretty_server_version,
                                                         max_version))
Example #26
0
 def pipeOpen(self, cmd, *args, **flags):
     l = tuple(cmd.split(" "))
     for (k, v) in flags.items():
         if v is not None:
             l += len(k) == 1 and ("-%s" % (k,), str(v)) or ("--%s=%s" % (k, v),)
     l += tuple(args)
     return sp.Popen(tuple(a for a in l if a), stdout=sp.PIPE, stderr=sp.PIPE)
Example #27
0
	def writeNetworkConfig(self):
		self.configuredInterfaces = []
		fp = file('/etc/network/interfaces', 'w')
		fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
		fp.write("auto lo\n")
		fp.write("iface lo inet loopback\n\n")
		for ifacename, iface in self.ifaces.items():
			if iface['up'] == True:
				fp.write("auto " + ifacename + "\n")
				self.configuredInterfaces.append(ifacename)
			if iface['dhcp'] == True:
				fp.write("iface "+ ifacename +" inet dhcp\n")
			if iface['dhcp'] == False:
				fp.write("iface "+ ifacename +" inet static\n")
				if iface.has_key('ip'):
					print tuple(iface['ip'])
					fp.write("	address %d.%d.%d.%d\n" % tuple(iface['ip']))
					fp.write("	netmask %d.%d.%d.%d\n" % tuple(iface['netmask']))
					if iface.has_key('gateway'):
						fp.write("	gateway %d.%d.%d.%d\n" % tuple(iface['gateway']))
			if iface.has_key("configStrings"):
				fp.write(iface["configStrings"])
			if iface["preup"] is not False and not iface.has_key("configStrings"):
				fp.write(iface["preup"])
			if iface["predown"] is not False and not iface.has_key("configStrings"):
				fp.write(iface["predown"])
			fp.write("\n")
		fp.close()
		self.configuredNetworkAdapters = self.configuredInterfaces
		self.writeNameserverConfig()
Example #28
0
def chunks(tags):   # TODO: option: does O count as a singleton chunk?
    '''
    >>> list(chunks(['O', 'B-evt', 'o', 'b-PER', 'I', 'I', 'B-PER', 'O', 'B-ORG', 'I-ORG'])) \
        #             0    1        2    3        4    5    6        7    8        9
    [(3,), (1, 4, 5), (6,), (8, 9)]
    '''
    ochk = []
    ichk = None
    for i,t in enumerate(tags):
        if isInGap(t):
            if ichk is None:
                assert not isInside(t)
            else:
                if isInside(t):
                    ichk.append(i)
                elif ichk:
                    yield tuple(ichk)
                    ichk = []
            if isBegin(t):
                ichk = [i]
        else:
            if ichk: yield tuple(ichk)  # post-gap
            ichk = None
            if isInside(t):
                ochk.append(i)
            elif ochk:
                yield tuple(ochk)
                ochk = []
            if isBegin(t):
                ochk = [i]
    assert ichk is None
    if ochk: yield tuple(ochk)
Example #29
0
def tokens_ngram_processor(tokens, ngram_len):
    """
    Given a `tokens` sequence or iterable of Tokens, return an iterator of
    tuples of Tokens where the tuples length is length `ngram_len`. Buffers at
    most `ngram_len` iterable items. The returned tuples contains
    either `ngram_len` items or less for these cases where the number of tokens
    is smaller than `ngram_len`:

    - between the beginning of the stream and a first gap
    - between a last gap and the end of the stream
    - between two gaps
    In these cases, shorter ngrams can be returned.
    """
    ngram = deque()
    for token in tokens:
        if len(ngram) == ngram_len:
            yield tuple(ngram)
            ngram.popleft()
        if token.gap:
            ngram.append(token)
            yield tuple(ngram)
            # reset
            ngram.clear()
        else:
            ngram.append(token)
    if ngram:
        # yield last ngram
        yield tuple(ngram)
def test_connected(grid, start, tolerence=1):
    mark = np.in1d(grid, [
                        color_code_map_inv['w'], 
                        color_code_map_inv['g']
                    ]
            ).reshape(grid.shape)
    frontier = [start]
    mark[tuple(start)] = True
    mark_cnt = 1
    needed_mark_count = math.floor((mark.size - np.count_nonzero(mark)) * tolerence)
    while len(frontier) > 0:
        loc = frontier.pop()
        for i in range(len(grid.shape)):
            l = np.copy(loc)
            l[i] = loc[i] - 1
            if l[i] >= 0 and not mark[tuple(l)]:
                mark[tuple(l)] = True
                mark_cnt += 1
                frontier.append(np.copy(l))
            l[i] = loc[i] + 1
            if l[i] < grid.shape[i] and not mark[tuple(l)]:
                mark[tuple(l)] = True
                mark_cnt += 1
                frontier.append(np.copy(l))
        if mark_cnt >= needed_mark_count:
            return True
    return False