Example #1
1
    def testGetFeaturesPolyFilterRectTests(self):
        """ Test fetching features from a polygon layer with filter rect"""
        try:
            if not self.poly_provider:
                return
        except:
            return

        extent = QgsRectangle(-73, 70, -63, 80)
        request = QgsFeatureRequest().setFilterRect(extent)
        features = [f["pk"] for f in self.poly_provider.getFeatures(request)]
        all_valid = all(f.isValid() for f in self.provider.getFeatures(request))
        # Some providers may return the exact intersection matches (2, 3) even without the ExactIntersect flag, so we accept that too
        assert set(features) == set([2, 3]) or set(features) == set([1, 2, 3]), "Got {} instead".format(features)
        self.assertTrue(all_valid)

        # Test with exact intersection
        request = QgsFeatureRequest().setFilterRect(extent).setFlags(QgsFeatureRequest.ExactIntersect)
        features = [f["pk"] for f in self.poly_provider.getFeatures(request)]
        all_valid = all(f.isValid() for f in self.provider.getFeatures(request))
        assert set(features) == set([2, 3]), "Got {} instead".format(features)
        self.assertTrue(all_valid)

        # test with an empty rectangle
        extent = QgsRectangle()
        features = [f["pk"] for f in self.provider.getFeatures(QgsFeatureRequest().setFilterRect(extent))]
        assert set(features) == set([1, 2, 3, 4, 5]), "Got {} instead".format(features)
Example #2
1
    def migrate(self, targets, plan=None, fake=False, fake_initial=False):
        """
        Migrates the database up to the given targets.

        Django first needs to create all project states before a migration is
        (un)applied and in a second step run all the database operations.
        """
        if plan is None:
            plan = self.migration_plan(targets)
        # Create the forwards plan Django would follow on an empty database
        full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)

        all_forwards = all(not backwards for mig, backwards in plan)
        all_backwards = all(backwards for mig, backwards in plan)

        if not plan:
            pass  # Nothing to do for an empty plan
        elif all_forwards == all_backwards:
            # This should only happen if there's a mixed plan
            raise InvalidMigrationPlan(
                "Migration plans with both forwards and backwards migrations "
                "are not supported. Please split your migration process into "
                "separate plans of only forwards OR backwards migrations.",
                plan,
            )
        elif all_forwards:
            self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)
        else:
            # No need to check for `elif all_backwards` here, as that condition
            # would always evaluate to true.
            self._migrate_all_backwards(plan, full_plan, fake=fake)

        self.check_replacements()
Example #3
1
def solve(A, *B):
    A = asarray(A)
    assert A.ndim == 2
    B = [asarray(b) for b in B]
    assert all(b.shape[0] == A.shape[0] and b.ndim in (1, 2) for b in B)
    S = [
        slice(i, i + b.shape[1]) if b.ndim == 2 else i
        for b, i in zip(B, numpy.cumsum([0] + [b[0].size for b in B[:-1]]))
    ]
    if not isrational(A) or not all(isrational(b) for b in B):
        A = A.astype(float)
        B = numpy.concatenate([b.astype(float).reshape(len(b), -1) for b in B], axis=1)
        Y = numpy.linalg.solve(A, B)
        X = [Y[:, s] for s in S]
    else:
        Ab = numpy.concatenate([A.numer] + [b.numer.reshape(len(b), -1) for b in B], axis=1)
        n = A.shape[1]
        for icol in range(n):
            if not Ab[icol, icol]:
                Ab[icol:] = Ab[icol + numpy.argsort([abs(v) if v else numpy.inf for v in Ab[icol:, icol]])]
            Ab[:icol] = Ab[:icol] * Ab[icol, icol] - Ab[:icol, icol, numpy.newaxis] * Ab[icol, :]
            Ab[icol + 1 :] = Ab[icol + 1 :] * Ab[icol, icol] - Ab[icol + 1 :, icol, numpy.newaxis] * Ab[icol, :]
        if Ab[n:].any():
            raise numpy.linalg.LinAlgError("linear system has no solution")
        w = numpy.diag(Ab[:n, :n])
        denom = gcd(*w)
        numer = Ab[:n, n:] * (denom // w[:, numpy.newaxis])
        X = [Rational(numer[:, s] * A.denom, denom * b.denom) for (s, b) in zip(S, B)]
        assert not any((dot(A, x) - b).numer.any() for (x, b) in zip(X, B))
    if len(B) == 1:
        X, = X
    return X
Example #4
1
def _validate_service_link(client, context, service_kind):
    env = _create_stack(client)

    image_uuid = context.image_uuid
    # use case #1 - service having selector's label,
    # is present when service with selector is created
    labels, service = _create_service(client, env, image_uuid, service_kind)
    service = client.wait_success(service)
    assert all(item in service.launchConfig.labels.items() for item in labels.items())
    launch_config = {"imageUuid": image_uuid}
    if service_kind == "loadBalancerService":
        service1 = client.create_service(
            name=random_str(), stackId=env.id, launchConfig=launch_config, selectorLink="foo=bar", lbConfig={}
        )
    else:
        service1 = client.create_service(
            name=random_str(), stackId=env.id, launchConfig=launch_config, selectorLink="foo=bar"
        )
    service1 = client.wait_success(service1)
    assert service1.selectorLink == "foo=bar"
    _validate_add_service_link(service1, service, client)
    # use case #2 - service having selector's label,
    # is added after service with selector creation
    labels, service2 = _create_service(client, env, image_uuid, service_kind)
    service2 = client.wait_success(service2)
    assert all(item in service2.launchConfig.labels.items() for item in labels.items())
    _validate_add_service_link(service1, service2, client)

    compose_config = env.exportconfig()
    assert compose_config is not None
    document = yaml.load(compose_config.dockerComposeConfig)
    assert len(document["services"][service1.name]["labels"]) == 1
    labels = {"io.rancher.service.selector.link": "foo=bar"}
    assert document["services"][service1.name]["labels"] == labels
Example #5
1
def _non_dominated_front_old(iterable, key=lambda x: x, allowequality=True):
    """Return a subset of items from iterable which are not dominated by any
    other item in iterable."""
    items = list(iterable)
    keys = dict((i, key(i)) for i in items)
    dim = len(keys.values()[0])
    if any(dim != len(k) for k in keys.values()):
        raise ValueError("Wrong tuple size.")

    # Make a dictionary that holds the items another item dominates.
    dominations = collections.defaultdict(lambda: [])
    for i in items:
        for j in items:
            if allowequality:
                if all(keys[i][k] < keys[j][k] for k in xrange(dim)):
                    dominations[i].append(j)
            else:
                if all(keys[i][k] <= keys[j][k] for k in xrange(dim)):
                    dominations[i].append(j)

    dominates = lambda i, j: j in dominations[i]

    res = set()
    items = set(items)
    for i in items:
        res.add(i)
        for j in list(res):
            if i is j:
                continue
            if dominates(j, i):
                res.remove(i)
                break
            elif dominates(i, j):
                res.remove(j)
    return res
Example #6
1
def test_poissoninput():
    # Test extreme cases and do a very basic test of an intermediate case, we
    # don't want tests to be stochastic
    G = NeuronGroup(
        10,
        """x : volt
                           y : volt
                           y2 : volt
                           z : volt
                           z2 : volt
                           w : 1""",
    )
    G.w = 0.5

    never_update = PoissonInput(G, "x", 100, 0 * Hz, weight=1 * volt)
    always_update = PoissonInput(G, "y", 50, 1 / defaultclock.dt, weight=2 * volt)
    always_update2 = PoissonInput(G, "y2", 50, 1 / defaultclock.dt, weight="1*volt + 1*volt")
    sometimes_update = PoissonInput(G, "z", 10000, 50 * Hz, weight=0.5 * volt)
    sometimes_update2 = PoissonInput(G, "z2", 10000, 50 * Hz, weight="w*volt")

    mon = StateMonitor(G, ["x", "y", "y2", "z", "z2"], record=True, when="end")

    run(1 * ms)
    assert_equal(0, mon.x[:])
    assert_equal(np.tile((1 + np.arange(mon.y[:].shape[1])) * 50 * 2 * volt, (10, 1)), mon.y[:])
    assert_equal(np.tile((1 + np.arange(mon.y[:].shape[1])) * 50 * 2 * volt, (10, 1)), mon.y2[:])
    assert all(np.var(mon.z[:], axis=1) > 0)  # variability over time
    assert all(np.var(mon.z[:], axis=0) > 0)  # variability over neurons
    assert all(np.var(mon.z2[:], axis=1) > 0)  # variability over time
    assert all(np.var(mon.z2[:], axis=0) > 0)  # variability over neurons
Example #7
1
    def test_smart_strings(self):
        """Lxml smart strings return values"""

        class SmartStringsSelector(Selector):
            _lxml_smart_strings = True

        body = """<body>
                    <div class='one'>
                      <ul>
                        <li>one</li><li>two</li>
                      </ul>
                    </div>
                    <div class='two'>
                      <ul>
                        <li>four</li><li>five</li><li>six</li>
                      </ul>
                    </div>
                  </body>"""

        response = HtmlResponse(url="http://example.com", body=body)

        # .getparent() is available for text nodes and attributes
        # only when smart_strings are on
        x = self.sscls(response)
        li_text = x.xpath("//li/text()")
        self.assertFalse(any(map(lambda e: hasattr(e._root, "getparent"), li_text)))
        div_class = x.xpath("//div/@class")
        self.assertFalse(any(map(lambda e: hasattr(e._root, "getparent"), div_class)))

        x = SmartStringsSelector(response)
        li_text = x.xpath("//li/text()")
        self.assertTrue(all(map(lambda e: hasattr(e._root, "getparent"), li_text)))
        div_class = x.xpath("//div/@class")
        self.assertTrue(all(map(lambda e: hasattr(e._root, "getparent"), div_class)))
Example #8
0
def test_add_channels():
    """Test evoked splitting / re-appending channel types
    """
    evoked = read_evokeds(fname, condition=0)
    evoked.info["buffer_size_sec"] = None
    evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
    evoked_meg = evoked.pick_types(meg=True, copy=True)
    evoked_stim = evoked.pick_types(meg=False, stim=True, copy=True)
    evoked_eeg_meg = evoked.pick_types(meg=True, eeg=True, copy=True)
    evoked_new = evoked_meg.add_channels([evoked_eeg, evoked_stim], copy=True)
    assert_true(all(ch in evoked_new.ch_names for ch in evoked_stim.ch_names + evoked_meg.ch_names))
    evoked_new = evoked_meg.add_channels([evoked_eeg], copy=True)

    assert_true(ch in evoked_new.ch_names for ch in evoked.ch_names)
    assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
    assert_true(all(ch not in evoked_new.ch_names for ch in evoked_stim.ch_names))

    # Now test errors
    evoked_badsf = evoked_eeg.copy()
    evoked_badsf.info["sfreq"] = 3.1415927
    evoked_eeg = evoked_eeg.crop(-0.1, 0.1)

    assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
    assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
    assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
    assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf)
    def test_to_dict(self, json_):
        c_spill = [point_line_release_spill(self.num_elements, self.start_position, self.start_time) for i in range(2)]

        u_spill = [
            point_line_release_spill(self.num_elements, self.start_position2, self.start_time2) for i in range(2)
        ]

        scp = SpillContainerPair(True)

        for sp_tuple in zip(c_spill, u_spill):
            scp += sp_tuple

        toserial = scp.to_dict()
        assert "spills" in toserial
        assert "uncertain_spills" in toserial

        for key in ("spills", "uncertain_spills"):
            if key == "spills":
                check = c_spill
            else:
                check = u_spill

            alltrue = [check[ix].id == spill["id"] for ix, spill in enumerate(toserial[key])]
            assert all(alltrue)
            alltrue = [check[ix].obj_type_to_dict() == spill["obj_type"] for ix, spill in enumerate(toserial[key])]
            assert all(alltrue)
Example #10
0
def isprint(s):
    """isprint(s) -> bool

    Return True if the argument is printable

    Example:

        >>> isprint(ord('a'))
        True
        >>> isprint('abc')
        True
        >>> isprint('\x01')
        False
        >>> isprint(b'abc')
        True
        >>> isprint(b'\x01')
        False
    """
    chars = string.ascii_letters + string.digits + string.punctuation + " "

    if isinstance(s, int):
        return chr(s) in chars
    elif isinstance(s, bytes):
        return all(c in map(ord, chars) for c in s)
    else:
        return all(c in chars for c in s)
def main():

    # What to run
    argdicts = {
        "-driver": "ode",
        "-exact": ["sin", "poly3", "poly2", "stiff_test"],
        "-ts": ["bdf2", "midpoint-bdf", "tr", "bdf1"],
        "-tmax": 10,
        "-tol": 1e-4,
        "-disable-mm-opt": 1,
        "-always-write-trace": 1,  # Otherwise we get wrong ndts by counting len(dts)
    }

    # ??ds not sure why we need "-disable-mm-opt",
    # but there's something stupid going on with mass matrix storage

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir)

    # Get data
    datasets = list(filter(lambda d: d is not None, map(mm.parse_run, outdirs)))

    # Check things ran
    test_results = []
    test_results.append(all([e == 0 for e in err_codes]))

    # Use bdf2's nsteps as a maximum, tr and imr are more accurate than
    # bdf2 so this should be true (unless bdf2's numerical damping has
    # kicked in and caused it to jump to a steady state too soon), (we
    # assume that the order of data is preserved here so that bdf_data[n]
    # is the same exact solution as imr_data[n] etc.).
    bdf2_data = [d for d in datasets if d["-ts"] == "bdf2"]

    for ts in argdicts["-ts"]:

        # bdf1 sucks (first order) so it needs far more steps, do it
        # manually.
        if ts == "bdf1":
            max_err = 0.4
            max_steps = [550, 3800, 1050, 70]
        else:
            max_err = 0.07
            max_steps = [1.3 * len(d["times"]) for d in bdf2_data]

        ts_data = [d for d in datasets if d["-ts"] == ts]

        # Check errors are small
        test_results.append(all([tests.check_error_norm(d, max_err) for d in ts_data]))

        # Check all the runs:
        nsteps_ok = [tests.check_ndt_less_than(d, m) for d, m in zip(ts_data, max_steps)]
        test_results.append(all(nsteps_ok))

    if all(test_results):
        return 0
    else:
        return 1
Example #12
0
    def polar(self):
        """
        Return the polar (dual) polytope.

        The polytope must have the IP-property (see
        :meth:`has_IP_property`), that is, the origin must be an
        interior point. In particular, it must be full-dimensional.

        OUTPUT:

        The polytope whose vertices are the coefficient vectors of the
        inequalities of ``self`` with inhomogeneous term normalized to
        unity.

        EXAMPLES::

            sage: p = Polyhedron(vertices=[(1,0,0),(0,1,0),(0,0,1),(-1,-1,-1)], base_ring=ZZ)
            sage: p.polar()
            A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 4 vertices
            sage: type(_)
            <class 'sage.geometry.polyhedron.backend_ppl.Polyhedra_ZZ_ppl_with_category.element_class'>
            sage: p.polar().base_ring()
            Integer Ring
        """
        if not self.has_IP_property():
            raise ValueError("The polytope must have the IP property.")

        vertices = [ieq.A() / ieq.b() for ieq in self.inequality_generator()]
        if all(all(v_i in ZZ for v_i in v) for v in vertices):
            return Polyhedron(vertices=vertices, base_ring=ZZ)
        else:
            return Polyhedron(vertices=vertices, base_ring=QQ)
Example #13
0
    def CalculateNgramPMI(self, k, N):
        nSum = sum([self.counts[N][x] for x in self.counts[N]])
        unSum = sum([self.counts[1][x] for x in self.counts[1]])

        wordProbs = {x[0]: float(self.counts[1][x]) / unSum for x in self.counts[1]}  # word probabilities
        jointProbs = {
            x: float(self.counts[N][x]) / nSum for x in self.counts[N] if self.counts[N][x] > 15
        }  # joint probabilites

        probs = {}  # PMI of N-grams

        for nGram, jProb in jointProbs.iteritems():
            indvSum = 1.0
            for i in range(0, N):
                indvSum *= float(wordProbs[nGram[i]])
            probs[nGram] = log((jProb / indvSum), 2)

        topK = sorted(probs.iteritems(), key=operator.itemgetter(1), reverse=True)
        newK = []

        for gram in topK:
            if all([self.tags[gram[0]][i] in self.AcceptedPOSTags for i in range(0, N)]):
                if all([self.tags[gram[0]][i] not in self.Nouns for i in range(0, N)]):
                    newK.append(gram)

        newK = newK[0:k]
        self.counts[N] = {
            key[0]: self.counts[N][key[0]] for key in newK
        }  # Replace nGrams with high information features
Example #14
0
def _normalize_freeze_cell(freeze, periodicity=3):
    """ Transforms freeze parameters into a normalized form. 
  
      The normalized form is a list of six boolean where, if True, each of xx,
      yy, zz, yz, xy, xz is *frozen*. The other forms allow strings, list of
      strings, or the same list of booleans as the output.

      If periodicity is 2, then the degrees of freedom are xx, yy.
  """
    from numpy import array

    if isinstance(freeze, str):
        freeze = freeze.split()
    if periodicity == 3:
        if len(freeze) == 6 and all(isinstance(u, bool) or isinstance(u, int) for u in freeze):
            return [u == True for u in freeze]
        freeze = set([u.lower() for u in freeze])
        return array(
            [
                "xx" in freeze,
                "yy" in freeze,
                "zz" in freeze,
                ("yz" in freeze or "zy" in freeze),
                ("xy" in freeze or "yx" in freeze),
                ("xz" in freeze or "zx" in freeze),
            ]
        )
    elif periodicity == 2:
        if len(freeze) == 2 and all(isinstance(u, bool) or isinstance(u, int) for u in freeze):
            return [u == True for u in freeze]
        freeze = set([u.lower() for u in freeze])
        return array(["xx" in freeze, "yy" in freeze])
Example #15
0
    def commit_offsets_sync(self, offsets):
        """Commit specific offsets synchronously.

        This method will retry until the commit completes successfully or an
        unrecoverable error is encountered.

        Arguments:
            offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit

        Raises error on failure
        """
        assert self.config["api_version"] >= (0, 8, 1), "Unsupported Broker API"
        assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
        assert all(map(lambda v: isinstance(v, OffsetAndMetadata), offsets.values()))
        if not offsets:
            return

        while True:
            if self.config["api_version"] >= (0, 8, 2):
                self.ensure_coordinator_known()

            future = self._send_offset_commit_request(offsets)
            self._client.poll(future=future)

            if future.succeeded():
                return future.value

            if not future.retriable():
                raise future.exception  # pylint: disable-msg=raising-bad-type

            time.sleep(self.config["retry_backoff_ms"] / 1000.0)
Example #16
0
    def __init__(self, name, values, rand=True, urn=[]):
        """
        Parameters
        ----------
        name : str
            name for the Variable
        values : iterable of str
            list of values that the Variable can assume (iterable, i.e. can be
            dict with keys)
        rand : bool
            randomize the sequence of values
        urn : list of Variables
            Variables which are drawn from the same urn BEFORE the
            current Variable (i.e. the current Variable can only assume values
            not taken by any PI in urn.
        """
        self.is_rand = rand
        self.name = name

        # validate urn
        for u in urn:
            if not all(v1 == v2 for v1, v2 in zip(values, u.values)):
                raise ValueError("urn contains incommensurable Variable")
        self.urn = urn

        # validate values:
        assert all(isinstance(v, str) for v in values)
        assert len(values) < 256, "not implemented"
        #        self.values = values
        self.cells = dict(enumerate(values))
        self.N = len(values)  # theN of categories
        self.Ndraw = self.N - len(self.urn)  # the N of possible values for each trial
Example #17
0
 def test(case):
     assert len(self.funcs) > 0
     for i in range(len(case)):
         ret = self.funcs[0](copy.deepcopy(case), i)
         assert all(f(copy.deepcopy(case), i) == ret for f in self.funcs[1:])
     ret = self._traverse(copy.deepcopy(case), self.funcs[0])
     assert all(self._traverse(copy.deepcopy(case), f) == ret for f in self.funcs[1:])
def test_get_spill_mask():
    "Simple tests for get_spill_mask"

    start_time0 = datetime(2012, 1, 1, 12)
    start_time1 = datetime(2012, 1, 2, 12)
    start_time2 = start_time1 + timedelta(hours=1)
    start_position = (23.0, -78.5, 0.0)
    num_elements = 5
    sc = SpillContainer()
    sp0 = point_line_release_spill(num_elements, start_position, start_time0)

    sp1 = point_line_release_spill(
        num_elements,
        start_position,
        start_time1,
        end_position=(start_position[0] + 0.2, start_position[1] + 0.2, 0.0),
        end_release_time=start_time1 + timedelta(hours=3),
    )

    sp2 = point_line_release_spill(num_elements, start_position, start_time2)

    sc.spills += [sp0, sp1, sp2]

    # as we move forward in time, the spills will release LEs in an
    # expected way

    sc.prepare_for_model_run(windage_at)
    sc.release_elements(100, start_time0)
    sc.release_elements(100, start_time0 + timedelta(hours=24))
    sc.release_elements(100, start_time1 + timedelta(hours=1))
    sc.release_elements(100, start_time1 + timedelta(hours=3))

    assert all(sc["spill_num"][sc.get_spill_mask(sp2)] == 2)
    assert all(sc["spill_num"][sc.get_spill_mask(sp0)] == 0)
    assert all(sc["spill_num"][sc.get_spill_mask(sp1)] == 1)
Example #19
0
 def test1(self):
     assert_(all(self.A == self.B))
     assert_(all(self.A >= self.B))
     assert_(all(self.A <= self.B))
     assert_(not any(self.A > self.B))
     assert_(not any(self.A < self.B))
     assert_(not any(self.A != self.B))
Example #20
0
def _check_models(models, allow_dict=False):
    input_type_valid = False

    # Check for single item
    if isinstance(models, (Model, Document)):
        models = [models]

    # Check for sequence
    if isinstance(models, Sequence) and all(isinstance(x, (Model, Document)) for x in models):
        input_type_valid = True

    if allow_dict:
        if (
            isinstance(models, dict)
            and all(isinstance(x, string_types) for x in models.keys())
            and all(isinstance(x, (Model, Document)) for x in models.values())
        ):
            input_type_valid = True

    if not input_type_valid:
        if allow_dict:
            raise ValueError(
                "Input must be a Model, a Document, a Sequence of Models and Document, or a dictionary from string to Model and Document"
            )
        else:
            raise ValueError("Input must be a Model, a Document, or a Sequence of Models and Document")

    return models
Example #21
0
 def _module_quotient(self, other, relations=False):
     # See: [SCA, section 2.8.4]
     if relations and len(other.gens) != 1:
         raise NotImplementedError
     if len(other.gens) == 0:
         return self.ring.ideal(1)
     elif len(other.gens) == 1:
         # We do some trickery. Let f be the (vector!) generating ``other``
         # and f1, .., fn be the (vectors) generating self.
         # Consider the submodule of R^{r+1} generated by (f, 1) and
         # {(fi, 0) | i}. Then the intersection with the last module
         # component yields the quotient.
         g1 = list(other.gens[0]) + [1]
         gi = [list(x) + [0] for x in self.gens]
         # NOTE: We *need* to use an elimination order
         M = self.ring.free_module(self.rank + 1).submodule(*([g1] + gi), order="ilex", TOP=False)
         if not relations:
             return self.ring.ideal(*[x[-1] for x in M._groebner_vec() if all(y == self.ring.zero for y in x[:-1])])
         else:
             G, R = M._groebner_vec(extended=True)
             indices = [i for i, x in enumerate(G) if all(y == self.ring.zero for y in x[:-1])]
             return (self.ring.ideal(*[G[i][-1] for i in indices]), [[-x for x in R[i][1:]] for i in indices])
     # For more generators, we use I : <h1, .., hn> = intersection of
     #                                    {I : <hi> | i}
     # TODO this can be done more efficiently
     return reduce(
         lambda x, y: x.intersect(y), (self._module_quotient(self.container.submodule(x)) for x in other.gens)
     )
Example #22
0
 def test_mixture_fraction(self):
     self.create_sim(p=ct.one_atm)
     Z = self.sim.mixture_fraction("H")
     self.assertNear(Z[0], 1.0)
     self.assertNear(Z[-1], 0.0)
     self.assertTrue(all(Z >= 0))
     self.assertTrue(all(Z <= 1.0))
Example #23
0
    def _get_dates(dates, use_arrays=None):
        # {{{
        import numpy as np

        if use_arrays is None:
            use_arrays = any(hasattr(d, "__len__") for d in dates.itervalues())

        if use_arrays:
            assert all(hasattr(d, "__len__") for d in dates.itervalues())
            n = set(len(d) for d in dates.itervalues())
            assert len(n) == 1, "inconsistent array lengths"
            n = n.pop()
            zeros = np.zeros(n, "int32") if use_arrays else 0
            ones = np.ones(n, "int32") if use_arrays else 1
        else:
            assert all(
                [(hasattr(d, "__len__") and len(d) == 1) or not hasattr(d, "__len__") for d in dates.itervalues()]
            )
            zeros = 0
            ones = 1

        year = dates.get("year", zeros)
        month = dates.get("month", ones)
        day = dates.get("day", ones)
        hour = dates.get("hour", zeros)
        minute = dates.get("minute", zeros)
        second = dates.get("second", zeros)

        if not use_arrays:
            # Fuck you, numpy scalars!
            year, month, day = int(year), int(month), int(day)
            hour, minute, second = int(hour), int(minute), int(second)

        return year, month, day, hour, minute, second
Example #24
0
    def run_reacting_surface(self, xch4, tsurf, mdot, width):
        # Simplified version of the example 'catalytic_combustion.py'
        gas = ct.Solution("../data/ptcombust-simple.cti", "gas")
        surf_phase = ct.Interface("../data/ptcombust-simple.cti", "Pt_surf", [gas])

        tinlet = 300.0  # inlet temperature
        comp = {"CH4": xch4, "O2": 0.21, "N2": 0.79}
        gas.TPX = tinlet, ct.one_atm, comp
        surf_phase.TP = tsurf, ct.one_atm

        # integrate the coverage equations holding the gas composition fixed
        # to generate a good starting estimate for the coverages.
        surf_phase.advance_coverages(1.0)

        sim = ct.ImpingingJet(gas=gas, width=width, surface=surf_phase)
        sim.set_refine_criteria(10.0, 0.3, 0.4, 0.0)

        sim.inlet.mdot = mdot
        sim.inlet.T = tinlet
        sim.inlet.X = comp
        sim.surface.T = tsurf

        sim.solve(loglevel=0, auto=True)

        self.assertTrue(all(np.diff(sim.T) > 0))
        self.assertTrue(all(np.diff(sim.Y[gas.species_index("CH4")]) < 0))
        self.assertTrue(all(np.diff(sim.Y[gas.species_index("CO2")]) > 0))
Example #25
0
 def test_binomial(self, df_to_output_mock):
     main()
     df = df_to_output_mock.call_args_list[0][0][1]
     self.assertEqual(len(df), 10)
     self.assertTrue(all([round(x, 0) == x for x in df.c0]))
     self.assertTrue(all([x >= 0 for x in df.c0]))
     self.assertTrue(all([x <= 10 for x in df.c0]))
Example #26
0
    def make(self, msg):
        """Return a string of the expected format"""
        val = msg[self.name]
        size = struct.calcsize(self.format)
        assert len(val) <= size

        # If the supplied value is a list of chars, or a list of bytes, turn
        # it into a string for ease of processing.
        if isinstance(val, list):
            if all(isinstance(c, bytes) for c in val):
                val = "".join([c.decode() for c in val])
            elif all(isinstance(c, str) for c in val):
                val = "".join([c for c in val])
            else:
                error = "Invalid value for string element: {}"
                raise TypeError(error.format(val))
        elif isinstance(val, bytes):
            # If the supplied value is a byes, decode it into a normal string
            val = val.decode()

        # 'p' (pascal strings) and 'c' (char list) must be the exact size of
        # the format
        if self.format[-1] == "p" and len(val) < size - 1:
            val += "\x00" * (size - len(val) - 1)

        # Lastly, 'c' (char list) formats are expected to be a list of
        # characters rather than a string.
        if self.format[-1] == "c":
            val += "\x00" * (size - len(val))
            val = [c for c in val]

        return val
Example #27
0
def all_ship_positions(length, condition):
    for i, j in ALL_POSITIONS:
        if all(i + k <= 9 and condition((i + k, j)) for k in range(length)):
            yield set((i + k, j) for k in range(length))

        if all(j + k <= 9 and condition((i, j + k)) for k in range(length)):
            yield set((i, j + k) for k in range(length))
Example #28
0
    def pack(self, msg):
        """Pack the provided values into the supplied buffer."""
        # Ensure that the input is of the proper form to be packed
        val = msg[self.name]
        size = struct.calcsize(self.format)
        assert len(val) <= size
        if self.format[-1] in ("s", "p"):
            if not isinstance(val, bytes):
                assert isinstance(val, str)
                val = val.encode()
                if self.format[-1] == "p" and len(val) < size:
                    # 'p' (pascal strings) must be the exact size of the format
                    val += b"\x00" * (size - len(val))

            data = self._struct.pack(val)
        else:  # 'c'
            if not all(isinstance(c, bytes) for c in val):
                if isinstance(val, bytes):
                    val = [bytes([c]) for c in val]
                else:
                    # last option, it could be a string, or a list of strings
                    assert (isinstance(val, list) and all(isinstance(c, str) for c in val)) or isinstance(val, str)
                    val = [c.encode() for c in val]
            if len(val) < size:
                val.extend([b"\x00"] * (size - len(val)))
            data = self._struct.pack(*val)

        # If the data does not meet the alignment, add some padding
        missing_bytes = len(data) % self._alignment
        if missing_bytes:
            data += b"\x00" * (self._alignment - missing_bytes)
        return data
Example #29
0
def prepare_roidb(imdb):
    """Enrich the imdb's roidb by adding some derived quantities that
    are useful for training. This function precomputes the maximum
    overlap, taken over ground-truth boxes, between each ROI and
    each ground-truth box. The class with maximum overlap is also
    recorded.
    """
    sizes = [PIL.Image.open(imdb.image_path_at(i)).size for i in xrange(imdb.num_images)]
    roidb = imdb.roidb
    for i in xrange(len(imdb.image_index)):
        roidb[i]["image"] = imdb.image_path_at(i)
        roidb[i]["width"] = sizes[i][0]
        roidb[i]["height"] = sizes[i][1]
        # need gt_overlaps as a dense array for argmax
        gt_overlaps = roidb[i]["gt_overlaps"].toarray()
        # max overlap with gt over classes (columns)
        max_overlaps = gt_overlaps.max(axis=1)
        # gt class that had the max overlap
        max_classes = gt_overlaps.argmax(axis=1)
        roidb[i]["max_classes"] = max_classes
        roidb[i]["max_overlaps"] = max_overlaps
        # sanity checks
        # max overlap of 0 => class should be zero (background)
        zero_inds = np.where(max_overlaps == 0)[0]
        assert all(max_classes[zero_inds] == 0)
        # max overlap > 0 => class should not be zero (must be a fg class)
        nonzero_inds = np.where(max_overlaps > 0)[0]
        assert all(max_classes[nonzero_inds] != 0)
Example #30
0
def elemwise(op, *args, **kwargs):
    """ Elementwise operation for dask.Dataframes """
    columns = kwargs.get("columns", None)
    name = kwargs.get("name", None)

    _name = "elemwise" + next(tokens)

    dfs = [arg for arg in args if isinstance(arg, _Frame)]
    other = [(i, arg) for i, arg in enumerate(args) if not isinstance(arg, _Frame)]

    if other:
        op2 = partial_by_order(op, other)
    else:
        op2 = op

    assert all(df.divisions == dfs[0].divisions for df in dfs)
    assert all(df.npartitions == dfs[0].npartitions for df in dfs)

    dsk = dict(((_name, i), (op2,) + frs) for i, frs in enumerate(zip(*[df._keys() for df in dfs])))

    if columns is not None:
        return DataFrame(merge(dsk, *[df.dask for df in dfs]), _name, columns, dfs[0].divisions)
    else:
        column_name = name or consistent_name(n for df in dfs for n in df.columns)
        return Series(merge(dsk, *[df.dask for df in dfs]), _name, column_name, dfs[0].divisions)