def diff_structure(st_keys, st_one, st_two):
    st_new = {}
    for i in st_one.keys():
        if i in st_keys:
            if i not in st_two:
                st_new[i] = st_one[i]
            elif st_keys[i] == "string" and str(st_one[i]) != str(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "int" and int(st_one[i]) != int(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "long" and long(st_one[i]) != long(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "array":
                a_st_one = st_one[i] if isinstance(st_one[i], list) else eval(st_one[i])
                b_st_one = st_one[i] if isinstance(st_two[i], list) else eval(st_two[i])
                em_news = list(set(a_st_one).difference(set(b_st_one)))
                em_deleted = list(set(b_st_one).difference(set(a_st_one)))
                if len(em_news) > 0 or len(em_deleted) > 0:
                    st_new[i] = a_st_one
            else:
                pass
    if len(st_new.keys()) > 0:
        return st_new
    else:
        return None
def main():
    # path = 'ExampleData/input_files/'
    # endPath = 'ExampleData/test/'
    path, endPath = getFilePath()
    reader_fgi = list(csv.reader(open(path + "fGI_stats.csv", "rt", encoding="ascii"), delimiter=","))
    reader_core = list(csv.reader(open(path + "Core_attfGI.csv", "rt", encoding="ascii"), delimiter=","))
    genomeListing = list(open(path + "db.txt", "r"))

    genomeClusterDict = pickle.load(open(path + "genomeCluster.dict", "rb"))
    genomeLocusDict = pickle.load(open(path + "genomeLocus.dict", "rb"))
    coreDict, fgiDict = createCoreClusterDict(reader_core)

    # genome = 'E00002'
    genomeIdDict = {}
    index = 3
    for genome2 in genomeListing:
        if "\n" in genome2:
            genome2 = genome2[0:-1]
        genomeIdDict[genome2] = index
        index += 1

    for genome in genomeIdDict:
        genomeDict = createfgiInsertDict(reader_fgi, genome)
        referenceList = createfGIFeatures(
            genomeDict, coreDict, fgiDict, genomeClusterDict, genomeLocusDict, genome, genomeIdDict[genome]
        )
        writeFile(endPath, genome, referenceList)

    genomeDict = createfgiInsertDict(reader_fgi, genome)
    referenceList = createfGIFeatures(
        genomeDict, coreDict, fgiDict, genomeClusterDict, genomeLocusDict, genome, genomeIdDict[genome]
    )

    writeFile(endPath, genome, referenceList)
Example #3
1
def get_feed(max_feed=5):
    """
		This function is responsible to get RSS feeds.
	"""
    try:
        f = urllib2.urlopen("http://feeds.feedburner.com/XSSPosed")
    except urllib2.URLError:
        raise Exception("Internet connection problems")

    tree = xml_parser.fromstring(f.read())
    channel = tree.find("channel")
    items = channel.findall("item")
    for item in reversed(list(items)[0:max_feed]):
        link = list(item.iter("link"))[0].text
        if link not in CONTAINER:
            CONTAINER.add(link)
            site_response = urllib2.urlopen(link).read()
            details = get_details(site_response)
            yield {
                "title": list(item.iter("title"))[0].text,
                "description": details["description"],
                "link": link,
                "status": str(details["status"]),
                "exploit": get_exploit(site_response),
                "post_data": details["post_data"],
            }
Example #4
1
File: eepy.py Project: yuka2py/eepy
    def block(blockname="content"):
        """ with 句で囲んだ範囲をブロックとして登録する。
        既に保存されたブロックがある時、ブロックの内容を保存されたブロックで置き換えます。
        保存されたブロックが無い時、ブロックの内容をそのまま出力し、ブロックを保存します。
        このヘルパは通常、extends と組み合わせて使用します。
        args:
            blockname: ブロックの名前
        """
        locals = buffer_frame_locals()
        locals.setdefault("__blocks", {})

        # 既に保存されたブロックがあれば、保存している内容を出力し、ここでのキャプチャ結果は破棄
        if blockname in locals["__blocks"]:
            buffer, locals["__buffer"] = locals["__buffer"], list()
            yield
            locals["__buffer"] = buffer
            locals["__buffer"].append(locals["__blocks"][blockname])  # 仕様:利用後も削除せずに残しておく

        # 保存されたブロックが無ければ、キャプチャ結果をブロックとして保存し、出力もする
        else:
            buffer, locals["__buffer"] = locals["__buffer"], list()
            yield
            captured = u"".join(locals["__buffer"])
            locals["__buffer"] = buffer
            locals["__buffer"].append(captured)
            locals["__blocks"][blockname] = captured
Example #5
1
    def run(self, module, post_check):
        try:
            # Simulate the sys.path behaviour decribed here:
            #
            # https://docs.python.org/2/library/sys.html#sys.path
            _cwd = os.getcwd()
            _sys_path = list(sys.path)
            _sys_argv = list(sys.argv)
            sys.path.insert(0, os.path.dirname(self._path))
            sys.argv = [os.path.basename(self._path)] + self._argv

            exec(self._code, module.__dict__)
            post_check()

        except Exception as e:
            self._failed = True
            self._error_detail = traceback.format_exc()

            exc_type, exc_value, exc_traceback = sys.exc_info()
            filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]

            self._error = '%s\nFile "%s", line %d, in %s:\n%s' % (
                str(e),
                os.path.basename(filename),
                line_number,
                func,
                txt,
            )

        finally:
            # undo sys.path, CWD fixups
            os.chdir(_cwd)
            sys.path = _sys_path
            sys.argv = _sys_argv
            self.ran = True
Example #6
1
def scatterplot(x_prop_name, x_property, y_prop_name, y_property, x_err=None, y_err=None, name="scatter_plot"):
    """Normal Scatter Plot"""

    if not x_err:
        x_err = list(numpy.zeros(len(x_property)))
    if not y_err:
        y_err = list(numpy.zeros(len(y_property)))

    matplotlib.pyplot.errorbar(
        numpy.log10(x_property),
        numpy.log10(y_property),
        yerr=y_err,
        xerr=x_err,
        marker="o",
        linestyle="None",
        mfc="blue",
        mec="green",
        ecolor="blue",
    )

    matplotlib.pyplot.xlabel(x_prop_name)
    matplotlib.pyplot.ylabel(y_prop_name)
    matplotlib.pyplot.legend()

    savepath = storepath + name + ".png"
    print "lightcurve saved to " + savepath
    matplotlib.pyplot.savefig(savepath)
    matplotlib.pyplot.close()
Example #7
1
    def testResolve(self):
        ad = Environment([])
        ws = WorkingSet([])
        # Resolving no requirements -> nothing to install
        self.assertEqual(list(ws.resolve([], ad)), [])
        # Request something not in the collection -> DistributionNotFound
        self.assertRaises(DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad)
        Foo = Distribution.from_filename("/foo_dir/Foo-1.2.egg", metadata=Metadata(("depends.txt", "[bar]\nBaz>=2.0")))
        ad.add(Foo)
        ad.add(Distribution.from_filename("Foo-0.9.egg"))

        # Request thing(s) that are available -> list to activate
        for i in range(3):
            targets = list(ws.resolve(parse_requirements("Foo"), ad))
            self.assertEqual(targets, [Foo])
            list(map(ws.add, targets))
        self.assertRaises(VersionConflict, ws.resolve, parse_requirements("Foo==0.9"), ad)
        ws = WorkingSet([])  # reset

        # Request an extra that causes an unresolved dependency for "Baz"
        self.assertRaises(DistributionNotFound, ws.resolve, parse_requirements("Foo[bar]"), ad)
        Baz = Distribution.from_filename("/foo_dir/Baz-2.1.egg", metadata=Metadata(("depends.txt", "Foo")))
        ad.add(Baz)

        # Activation list now includes resolved dependency
        self.assertEqual(list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo, Baz])
        # Requests for conflicting versions produce VersionConflict
        self.assertRaises(VersionConflict, ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad)
Example #8
1
    def join_migrations(self, state, migrations_or_ids, host_cmd=None, migration_agent=None):
        if not migrations_or_ids:
            raise ValueError(
                "join_migration() expects a list of migration" "or it's ids to join, got: %r", migrations_or_ids
            )
        migrations = map(self._get_migration, migrations_or_ids)
        for migration in migrations:
            if not migration.is_completable():
                prob = migration.get_problem()
                return fiber.fail(prob)

        if host_cmd is None:
            host_cmd = self.get_configuration().default_host_cmd
        resp = Migration(self, host_cmd=host_cmd, migration_agent=migration_agent)

        recipients = list()
        for migration in migrations:
            recipients += list(migration.get_analyzed())
            for entry in migration.get_checkin_entries():
                resp.checkin(entry)

        resp.analyze(recipients)

        [self.forget_migration(x) for x in migrations]
        f = self.get_shard_structure()
        f.add_callback(fiber.drop_param, self._topology_fixes, resp)
        f.add_callback(self._register_migration)
        return f
    def hookup(self):
        print "Loading Emails... takes some time :)"
        if self.emails is None:
            self.load_emails()

        print "Thread started"
        print "Emails: ", len(self.emails)
        self.client, self.address = self.socket.accept()
        print "Connected..."
        i = 0
        while 1:
            time.sleep(0.45)
            try:
                self.sim_date_time += datetime.timedelta(milliseconds=random.randint(10000, 60000))
                if self.sim_date_time.year > 2015:
                    break
                if self.sim_date_time.weekday() == 7 or self.sim_date_time.weekday() == 6:
                    continue
                if i < len(self.emails) - 1:
                    i += 1
                else:
                    i = 0
                row = self.emails.iloc[i]

                fr = "~".join(list(self.get_name(row["mailfrom"])))
                to = "~".join(list(self.get_name(row["torecipients"])))
                subj = row["subject"]
                msg = row["content"]

                self.write(fr + "|" + to + "|" + subj.replace("|", " ") + "|" + msg.replace("|", " "))

            except Exception as e:
                print e.message
                self.client, self.address = self.socket.accept()
Example #10
0
    def test_invalidate_by_one_to_one(self):
        extras = list(Extra.objects.cache().filter(post=3))
        Extra.objects.create(post_id=3, tag=0)

        with self.assertNumQueries(1):
            changed_extras = list(Extra.objects.cache().filter(post=3))
            self.assertEqual(len(changed_extras), len(extras) + 1)
Example #11
0
    def test_invalidate_by_foreign_key(self):
        posts = list(Post.objects.cache().filter(category=1))
        Post.objects.create(title="New Post", category_id=1)

        with self.assertNumQueries(1):
            changed_posts = list(Post.objects.cache().filter(category=1))
            self.assertEqual(len(changed_posts), len(posts) + 1)
def PredictAndAnalyze2(data, target, clf_cv=svm.LinearSVC(), balancing=True):
    aucs = []
    y_trueall = []
    y_pridictall = []
    if balancing == True:
        length = min([len(target[target == 0]), len(target[target == 1]), len(target[target == -1])])
        data = np.r_[data[target == 0][0:length], data[target == 1][0:length], data[target == -1][0:length]]
        target = np.r_[target[target == 0][0:length], target[target == 1][0:length], target[target == -1][0:length]]
    kf = KFold(len(target), n_folds=10, shuffle=True)
    vmats0 = np.array([])
    vmats1 = np.array([])
    vmats2 = np.array([])
    vmats3 = np.array([])
    for train, val in kf:
        X_train, y_train = np.array(data)[train], np.array(target)[train]
        X_test, y_test = np.array(data)[val], np.array(target)[val]
        clf_cv.fit(X_train, y_train)
        y_pred = clf_cv.predict(X_test)
        vmat0 = clf_cv.coef_[0]
        vmat1 = clf_cv.coef_[1]
        vmat2 = clf_cv.coef_[2]
        if vmats0.shape[0] == 0:
            vmats0 = vmat0
            vmats1 = vmat1
            vmats2 = vmat2
        else:
            vmats0 = np.c_[vmats0, vmat0]
            vmats1 = np.c_[vmats1, vmat1]
            vmats2 = np.c_[vmats2, vmat2]
        y_true = y_test
        y_trueall = y_trueall + list(y_true)
        y_pridictall = y_pridictall + list(y_pred)
    print (classification_report(y_trueall, y_pridictall))
    return y_trueall, y_pridictall, vmats0, vmats1, vmats2  # ,vmats3
Example #13
0
    def fill_proba_map(self):
        # optimized version of fill_proba_map
        ships_possible_positions = {}
        for ship in self._ships:
            length = SHIP_LENGTH[ship]
            positions = all_ship_positions(length, lambda p: self._board[p] == " ")
            ships_possible_positions[ship] = list(positions)

        ships_order = list(self._ships)
        ships_order.sort(key=lambda ship: len(ships_possible_positions[ship]), reverse=True)

        iterations = 0
        while iterations < 100000 and time.time() - self._start_time < TIMEOUT - 0.1:
            ships = []  # list of ships (set of positions)
            ships_positions = set()  # taken positions

            for ship in ships_order:
                positions = ships_possible_positions[ship]
                if ships:  # not the first ship
                    positions = [
                        points for points in positions if is_intersection_null(points, ships_possible_positions)
                    ]
                points = random.choice(positions)
                ships.append(points)
                ships_positions.update(points)

            for points in ships:
                for pos in points:
                    self._proba_map[pos] += 1

            iterations += 1

        logging.debug("[fill_proba_map] %d iterations in %s secs" % (iterations, time.time() - self._start_time))
Example #14
0
    def plan_layers(self, layers, output_files):
        next_config = BuildConfig()
        next_config.add_config(layers["layers"][0].config)

        layers["layers"][-1].url = self.name

        for i, layer in enumerate(layers["layers"]):
            log.info(
                "Processing layer: %s%s",
                layer.url,
                "" if "deps" in layer.directory.splitall() else " (from %s)" % layer.directory.relpath(),
            )
            if i + 1 < len(layers["layers"]):
                next_layer = layers["layers"][i + 1]
                next_config = next_config.add_config(next_layer.config)
            else:
                # Add an empty level to the configs to represent that there
                # is no layer after the current one.  This is important for
                # the IgnoreTactic, which needs to look ahead so that it can
                # handle ignoring entire directories.
                next_config = next_config.add_config({})
            list(
                e
                for e in utils.walk(
                    layer.directory, self.build_tactics, layer=layer, next_config=next_config, output_files=output_files
                )
            )
        plan = [t for t in output_files.values() if t]
        return plan
    def testPredict(self):
        """Tests weight column in evaluation."""

        def _input_fn_train():
            # Create 4 rows, one of them (y = x), three of them (y=Not(x))
            target = tf.constant([[1], [0], [0], [0]])
            features = {"x": tf.ones(shape=[4, 1], dtype=tf.float32)}
            return features, target

        def _input_fn_predict():
            y = tf.train.limit_epochs(tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=1)
            features = {"x": y}
            return features

        classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
            linear_feature_columns=[tf.contrib.layers.real_valued_column("x")],
            dnn_feature_columns=[tf.contrib.layers.real_valued_column("x")],
            dnn_hidden_units=[3, 3],
        )

        classifier.fit(input_fn=_input_fn_train, steps=100)

        probs = classifier.predict_proba(input_fn=_input_fn_predict)
        self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
        classes = classifier.predict(input_fn=_input_fn_predict)
        self.assertListEqual([0] * 4, list(classes))

        probs = classifier.predict_proba(input_fn=_input_fn_predict, as_iterable=True)
        self.assertAllClose([[0.75, 0.25]] * 4, list(probs), 0.05)
        classes = classifier.predict(input_fn=_input_fn_predict, as_iterable=True)
        self.assertListEqual([0] * 4, list(classes))
Example #16
0
    def test4AlignConfs(self):
        mol = Chem.MolFromSmiles("C1CC1CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4")

        cids = rdDistGeom.EmbedMultipleConfs(mol, 10, 30, 100)
        writer = Chem.SDWriter("mol_899.sdf")

        for cid in cids:
            print "cid:", repr(cid)
            ff = ChemicalForceFields.UFFGetMoleculeForceField(mol, confId=cid)
            ff.Initialize()
            more = 1
            while more:
                more = ff.Minimize()
            # FIX: this should not be necessary but somehow more comes out to be 0
            # even with the structure still being crappy
            ff.Minimize()
        aids = [12, 13, 14, 15, 16, 17, 18]
        rdMolAlign.AlignMolConformers(mol, aids)

        # now test that the atom location of these atom are consistent
        confs = mol.GetConformers()
        for aid in aids:
            mpos = 0
            for i, conf in enumerate(confs):
                if i == 0:
                    mpos = list(conf.GetAtomPosition(aid))
                    continue
                else:
                    pos = list(conf.GetAtomPosition(aid))

                    self.failUnless(lstFeq(mpos, pos, 0.5))
Example #17
0
def lstFeq(l1, l2, tol=1.0e-4):
    if len(list(l1)) != len(list(l2)):
        return 0
    for i in range(len(list(l1))):
        if not feq(l1[i], l2[i], tol):
            return 0
    return 1
Example #18
0
    def __init__(self, waveReader, onsetSamples):
        self.samples = waveReader.channels[0]
        self.segments = []
        self.onsets = [0]

        crossings = [i for i in xrange(len(self.samples) - 1) if self.samples[i] < 0 and self.samples[i + 1] >= 0]

        for onset in onsetSamples:
            self.onsets.append(crossings[bisect(crossings, onset) - 1])

        self.onsets = sorted(list(set(self.onsets)))

        for i in xrange(len(self.onsets) - 1):
            s = Segment(self.samples[self.onsets[i] : self.onsets[i + 1]])
            self.segments.append(s)

        simMatrix = self.findNeighborMatrix()

        smCopy = array(simMatrix, copy=True)
        fill_diagonal(smCopy, 0)

        sims = sorted(list(smCopy.reshape(-1)), reverse=True)[:TARGET_NUM_JUMPS]
        SIMILARITY_THRESHOLD = sims[-1]

        print "Using similarity threshold = ", SIMILARITY_THRESHOLD

        for i in xrange(len(self.segments)):
            self.segments[i].neighbors = [
                j for j in xrange(len(simMatrix[i])) if simMatrix[i][j] >= SIMILARITY_THRESHOLD and abs(i - j) > 10
            ]
Example #19
0
    def RFolder():
        folder = ProcessingConfig.getSetting(RUtils.R_FOLDER)
        if folder is None:
            if isWindows():
                if "ProgramW6432" in list(os.environ.keys()) and os.path.isdir(
                    os.path.join(os.environ["ProgramW6432"], "R")
                ):
                    testfolder = os.path.join(os.environ["ProgramW6432"], "R")
                elif "PROGRAMFILES(x86)" in list(os.environ.keys()) and os.path.isdir(
                    os.path.join(os.environ["PROGRAMFILES(x86)"], "R")
                ):
                    testfolder = os.path.join(os.environ["PROGRAMFILES(x86)"], "R")
                elif "PROGRAMFILES" in list(os.environ.keys()) and os.path.isdir(
                    os.path.join(os.environ["PROGRAMFILES"], "R")
                ):
                    testfolder = os.path.join(os.environ["PROGRAMFILES"], "R")
                else:
                    testfolder = "C:\\R"

                if os.path.isdir(testfolder):
                    subfolders = os.listdir(testfolder)
                    subfolders.sort(reverse=True)
                    for subfolder in subfolders:
                        if subfolder.startswith("R-"):
                            folder = os.path.join(testfolder, subfolder)
                            break
                else:
                    folder = ""
            else:
                folder = ""

        return os.path.abspath(str(folder))
    def testGetAvailableGlossaryMetaTypes(self):
        self.loginAsPortalOwner()
        tool = self.glossary_tool
        available_metatypes = tool.getAvailableGlossaryMetaTypes()
        glossary_metatypes = tool.glossary_metatypes

        # test available metatypes, base glossary selected by default
        self.assertEquals(available_metatypes, ("PloneGlossary", "ExampleGlossary"))
        self.assertEquals(glossary_metatypes, ("PloneGlossary",))

        # test : only selected metatypes are returned by getGlossaryUIDs
        glossary = self.glossary
        glossaryuid = glossary.UID()
        exampleglossary = self.addExampleGlossary(self.portal, "Example", (u"Sport", u"Tennis", u"Open source"))
        exampleuid = exampleglossary.UID()

        # test :
        glossary_uids = list(self.glossary_tool.getGlossaryUIDs())
        glossary_uids.sort()
        self.assertEquals(glossary_uids, [glossaryuid])

        # test : add a glossary type
        tool.glossary_metatypes = ("PloneGlossary", "ExampleGlossary")
        glossary_uids = list(self.glossary_tool.getGlossaryUIDs())
        glossary_uids.sort()
        uids = [glossaryuid, exampleuid]
        uids.sort()
        self.assertEquals(glossary_uids, uids)
        LOG.info("testGetAvailableGlossaryMetaTypes passed")
        self.logout()
 def check(self, fix=False, silent=False):
     """Checks a grid for errors, and optionally fixes them.  Errors checked for are:
     - blocks not connected to any other blocks
     - blocks with isolated rocktypes
     Returns True if no errors were found, and False otherwise.  If silent is True, there is no printout.
     Unconnected blocks are fixed by deleting them.  Isolated rocktype blocks are fixed by assigning them the
     most popular rocktype of their neighbours."""
     ok = True
     ub = self.unconnected_blocks
     if len(ub) > 0:
         ok = False
         if not silent:
             print "Unconnected blocks:", list(ub)
         if fix:
             for blk in ub:
                 self.delete_block(blk)
             if not silent:
                 print "Unconnected blocks fixed."
     ib = self.isolated_rocktype_blocks
     if len(ib) > 0:
         ok = False
         if not silent:
             print "Isolated rocktype blocks:", list(ib)
         if fix:
             for blk in ib:
                 nbr_rocktype = [self.block[nbr].rocktype.name for nbr in self.block[blk].neighbour_name]
                 pop_rocktype = max(set(nbr_rocktype), key=nbr_rocktype.count)
                 self.block[blk].rocktype = self.rocktype[pop_rocktype]
             if not silent:
                 print "Isolated rocktype blocks fixed."
     if ok and not silent:
         print "No problems found."
     return ok
Example #22
0
 def __init__(self, poll_interval):
     self.module_mtimes = {}
     self.keep_running = True
     self.poll_interval = poll_interval
     self.extra_files = list(self.global_extra_files)
     self.instances.append(self)
     self.file_callbacks = list(self.global_file_callbacks)
Example #23
0
def triangulation(vertices, nvertices):
    "triangulation"

    bedges = {}
    nmap = {}
    I = numpy.array([[2, 0], [0, 1], [1, 2]])
    for n123 in vertices:
        elem = element.TriangularElement()
        nmap[elem] = n123
        for iedge, (n1, n2) in enumerate(n123[I]):
            try:
                del bedges[(n2, n1)]
            except KeyError:
                bedges[(n1, n2)] = elem, iedge

    dofaxis = function.DofAxis(nvertices, nmap)
    stdelem = element.PolyTriangle(1)
    linearfunc = function.Function(dofaxis=dofaxis, stdmap=dict.fromkeys(nmap, stdelem))
    namedfuncs = {"spline2": linearfunc}

    connectivity = dict(bedges.iterkeys())
    N = list(connectivity.popitem())
    while connectivity:
        N.append(connectivity.pop(N[-1]))
    assert N[0] == N[-1]

    structure = []
    for n12 in zip(N[:-1], N[1:]):
        elem, iedge = bedges[n12]
        structure.append(elem.edge(iedge))

    topo = topology.UnstructuredTopology(list(nmap), ndims=2, namedfuncs=namedfuncs)
    topo.boundary = topology.StructuredTopology(structure, periodic=(1,))
    return topo
Example #24
0
    def get_url(
        self, endpoint, sortby=None, additional_restrictions=None, descending=None, page=1, ignore_restriction=None
    ):
        restrictions = None
        if self.restrictions is None:
            if additional_restrictions is not None:
                page = 1
                restrictions = list(additional_restrictions)
        else:
            restrictions = list(self.restrictions)
            if additional_restrictions is not None:
                page = 1
                restrictions = self.restrictions + additional_restrictions

            if ignore_restriction is not None:

                page = 1
                if len(restrictions) == 1:
                    restrictions = None
                else:
                    restrictions.remove(ignore_restriction)

        return url_for(endpoint, relname=self.relname) + get_get(
            restrictions, sortby, descending if descending is not None else self.descending, page
        )
Example #25
0
    def check_AlignIO_to_EMBOSS(self, in_filename, in_format, skip_formats=[], alphabet=None):
        """Can Bio.AlignIO write files seqret can read back?"""
        if alphabet:
            old_aligns = list(AlignIO.parse(in_filename, in_format, alphabet))
        else:
            old_aligns = list(AlignIO.parse(in_filename, in_format))

        formats = ["clustal", "phylip"]
        if len(old_aligns) == 1:
            formats.extend(["fasta", "nexus"])
        for temp_format in formats:
            if temp_format in skip_formats:
                continue
            # PHYLIP is a simple format which explicitly supports
            # multiple alignments (unlike FASTA).
            try:
                new_aligns = list(emboss_piped_AlignIO_convert(old_aligns, temp_format, "phylip"))
            except ValueError, e:
                # e.g. ValueError: Need a DNA, RNA or Protein alphabet
                # from writing Nexus files...
                continue
            try:
                self.assertTrue(compare_alignments(old_aligns, new_aligns))
            except ValueError, err:
                raise ValueError("Disagree on file %s %s in %s format: %s" % (in_format, in_filename, temp_format, err))
Example #26
0
 def load_stl_into_model(self, path, name, offset=[0, 0, 0], rotation=0, scale=[1.0, 1.0, 1.0]):
     model = stltool.stl(path)
     model.offsets = list(offset)
     model.rot = rotation
     model.scale = list(scale)
     model.filename = name
     minx = float("inf")
     miny = float("inf")
     minz = float("inf")
     maxx = float("-inf")
     maxy = float("-inf")
     maxz = float("-inf")
     for i in model.facets:
         for j in i[1]:
             if j[0] < minx:
                 minx = j[0]
             if j[1] < miny:
                 miny = j[1]
             if j[2] < minz:
                 minz = j[2]
             if j[0] > maxx:
                 maxx = j[0]
             if j[1] > maxy:
                 maxy = j[1]
             if j[2] > maxz:
                 maxz = j[2]
     model.dims = [minx, maxx, miny, maxy, minz, maxz]
     self.add_model(name, model)
     model.centeroffset = [-(model.dims[1] + model.dims[0]) / 2, -(model.dims[3] + model.dims[2]) / 2, 0]
     self.s.drawmodel(model, 2)
Example #27
0
 def test_transport_adapter_ordering(self):
     s = requests.Session()
     order = ["https://", "http://"]
     assert order == list(s.adapters)
     s.mount("http://git", HTTPAdapter())
     s.mount("http://github", HTTPAdapter())
     s.mount("http://github.com", HTTPAdapter())
     s.mount("http://github.com/about/", HTTPAdapter())
     order = ["http://github.com/about/", "http://github.com", "http://github", "http://git", "https://", "http://"]
     assert order == list(s.adapters)
     s.mount("http://gittip", HTTPAdapter())
     s.mount("http://gittip.com", HTTPAdapter())
     s.mount("http://gittip.com/about/", HTTPAdapter())
     order = [
         "http://github.com/about/",
         "http://gittip.com/about/",
         "http://github.com",
         "http://gittip.com",
         "http://github",
         "http://gittip",
         "http://git",
         "https://",
         "http://",
     ]
     assert order == list(s.adapters)
     s2 = requests.Session()
     s2.adapters = {"http://": HTTPAdapter()}
     s2.mount("https://", HTTPAdapter())
     assert "http://" in s2.adapters
     assert "https://" in s2.adapters
Example #28
0
    def test_70(self):
        Contained(name="aaa").save()
        contained_obj = Contained.objects.get(name="aaa")
        GenericContainer(content_object=contained_obj, name="bbb").save()

        qs = Contained.objects.cache().filter(containers__name="bbb")
        list(qs)
Example #29
0
def __run_helper__(environment, short_name, long_name, version, skip, install, quiet):
    helper = sys.modules[long_name]
    configured.append(short_name)
    cfg = helper.configuration()
    for dep in cfg.dependencies:
        dep_name = dep
        if not is_string(dep):
            dep_name = dep[0]
        if dep_name in configured:
            continue
        environment = __configure_package(environment, dep, skip, install, quiet)
        save_cache(environment)
    if not quiet:
        sys.stdout.write("Checking for " + short_name + " ")
        if not version is None:
            sys.stdout.write("v." + version)
        sys.stdout.write("\n")
        sys.stdout.flush()
    if skip:
        cfg.null()
    elif not cfg.is_installed(environment, version):
        if not install:
            raise Exception(help_name + " cannot be found.")
        cfg.install(environment, version)
    env = dict(list(cfg.environment.items()) + list(environment.items()))
    if not "PREREQUISITES" in env:
        env["PREREQUISITES"] = [short_name]
    else:
        tmp_env = env["PREREQUISITES"] + [short_name]
        env["PREREQUISITES"] = list(set(tmp_env))
    save_cache(env)  ## intermediate cache
    return env
    def tests_paginator_autopaginate(self):
        # first page
        req = RequestFactory().get("/")
        context = {"request": req}
        items = list(range(0, 20))
        page = paginator_autopaginate(context, items, per_page=10, page_var="val")
        self.assertIsInstance(page, Page)
        self.assertEqual(list(page), items[:10])

        # second page
        req = RequestFactory().get("/?val=2")
        context = {"request": req}
        page = paginator_autopaginate(context, items, per_page=10, page_var="val")
        self.assertEqual(list(page), items[10:20])

        # invalid page
        req = RequestFactory().get("/?val=3")
        context = {"request": req}
        self.assertRaises(Http404, paginator_autopaginate, context, items, per_page=10, page_var="val")

        # empty first page
        req = RequestFactory().get("/")
        context = {"request": req}
        page = paginator_autopaginate(context, [], per_page=10, page_var="val")
        self.assertListEqual(list(page), [])