Example #1
1
    def dir(self, *filters):
        """Return an alphabetical list of data_element keywords in the dataset.

        Intended mainly for use in interactive Python sessions.

        Parameters
        ----------
        filters : str
            Zero or more string arguments to the function. Used for
            case-insensitive match to any part of the DICOM name.

        Returns
        -------
        All data_element names in this dataset matching the filters.
        If no filters, return all DICOM keywords in the dataset.
        """
        allnames = []
        for tag, data_element in self.items():
            allnames.extend(all_names_for_tag(tag))
        # remove blanks - tags without valid names (e.g. private tags)
        allnames = [x for x in allnames if x]
        # Store found names in a dict, so duplicate names appear only once
        matches = {}
        for filter_ in filters:
            filter_ = filter_.lower()
            match = [x for x in allnames if x.lower().find(filter_) != -1]
            matches.update(dict([(x, 1) for x in match]))
        if filters:
            names = sorted(matches.keys())
            return names
        else:
            return sorted(allnames)
Example #2
1
            def create_display_frame():
                vbox = Gtk.VBox(spacing=6)
                model = Gtk.ListStore(str, str)

                def on_changed(combo):
                    it = combo.get_active_iter()
                    if it is None:
                        return
                    DURATION.format = model[it][0]
                    app.window.songlist.info.refresh()
                    app.window.qexpander.refresh()
                    # TODO: refresh info windows ideally too (but see #2019)

                def draw_duration(column, cell, model, it, data):
                    df, example = model[it]
                    cell.set_property("text", example)

                for df in sorted(DurationFormat.values):
                    # 4954s == longest ever CD, FWIW
                    model.append([df, format_time_preferred(4954, df)])
                duration = Gtk.ComboBox(model=model)
                cell = Gtk.CellRendererText()
                duration.pack_start(cell, True)
                duration.set_cell_data_func(cell, draw_duration, None)
                index = sorted(DurationFormat.values).index(DURATION.format)
                duration.set_active(index)
                duration.connect("changed", on_changed)
                hbox = Gtk.HBox(spacing=6)
                label = Gtk.Label(label=_("Duration totals") + ":", use_underline=True)
                label.set_mnemonic_widget(duration)
                hbox.pack_start(label, False, True, 0)
                hbox.pack_start(duration, False, True, 0)

                vbox.pack_start(hbox, False, True, 0)
                return qltk.Frame(_("Display"), child=vbox)
Example #3
1
    def run(self):
        # updates the existing list or creates a new list
        # add the new items in 'contents' to the existing list for the topic
        # if a package seems to be removed in the new contents and
        # and is in 'done' topic, it gets removed in the new list

        self._ensure_pad_exists()
        doc = self._parse_pad()

        topic = self.module.params["topic"]
        new_contents = set(self.module.params["contents"])
        done = doc["done"]

        existing_list = set(doc.get(topic, []))
        updated_list = existing_list.difference(done).union(new_contents)
        contents_changed = existing_list != updated_list

        if contents_changed:
            doc[topic] = sorted(updated_list)
            doc["all"] = sorted(self._all_items(doc, ["all", "done"]))
            updated_yaml = yaml.safe_dump(doc, default_flow_style=False)
            self.epad.setText(padID=self.pad_id, text=updated_yaml)

        result = {"changed": contents_changed}
        self.module.exit_json(**result)
Example #4
1
    def expand_macro(self, formatter, name, args):
        from trac.mimeview.api import Mimeview

        mime_map = Mimeview(self.env).mime_map
        mime_type_filter = ""
        args, kw = parse_args(args)
        if args:
            mime_type_filter = args.pop(0).strip().rstrip("*")

        mime_types = {}
        for key, mime_type in mime_map.iteritems():
            if (not mime_type_filter or mime_type.startswith(mime_type_filter)) and key != mime_type:
                mime_types.setdefault(mime_type, []).append(key)

        return tag.div(class_="mimetypes")(
            tag.table(class_="wiki")(
                tag.thead(
                    tag.tr(
                        tag.th(_("MIME Types")),  # always use plural
                        tag.th(tag.a("WikiProcessors", href=formatter.context.href.wiki("WikiProcessors"))),
                    )
                ),
                tag.tbody(
                    tag.tr(
                        tag.th(tag.tt(mime_type), style="text-align: left"),
                        tag.td(tag.code(" ".join(sorted(mime_types[mime_type])))),
                    )
                    for mime_type in sorted(mime_types.keys())
                ),
            )
        )
 def test_reverse_nocopy(self):
     G = networkx.MultiDiGraph([(0, 1), (0, 1)])
     R = G.reverse(copy=False)
     assert_equal(sorted(R.edges()), [(1, 0), (1, 0)])
     R.remove_edge(1, 0)
     assert_equal(sorted(R.edges()), [(1, 0)])
     assert_equal(sorted(G.edges()), [(1, 0)])
Example #6
1
 def testWorkflows(self):
     """Test workflows defined in SoS script"""
     script = SoS_Script("""[0]""")
     self.assertEqual(sorted(script.workflows), ["default"])
     script = SoS_Script("""[0]\n[1]""")
     self.assertEqual(sorted(script.workflows), ["default"])
     script = SoS_Script("""[0]\n[*_1]""")
     self.assertEqual(sorted(script.workflows), ["default"])
     script = SoS_Script("""[0]\n[*_1]\n[auxiliary:provides='{a}.txt']""")
     self.assertEqual(sorted(script.workflows), ["auxiliary", "default"])
     script = SoS_Script("""[0]\n[*_1]\n[human_1]""")
     self.assertEqual(sorted(script.workflows), ["default", "human"])
     script = SoS_Script("""[0]\n[*_1]\n[human_1]\n[mouse_2]""")
     self.assertEqual(sorted(script.workflows), ["default", "human", "mouse"])
     script = SoS_Script("""[0]\n[*_1]\n[human_1]\n[mouse_2]\n[s*_2]""")
     self.assertEqual(sorted(script.workflows), ["default", "human", "mouse"])
     # skip option is not effective at parsing time
     script = SoS_Script("""[0]\n[*_1]\n[human_1]\n[mouse_2:skip]\n[s*_2]""")
     self.assertEqual(sorted(script.workflows), ["default", "human", "mouse"])
     # unnamed
     script = SoS_Script("""[0]\n[*_1]\n[human_1]\n[mouse]\n[s*_2]""")
     self.assertEqual(sorted(script.workflows), ["default", "human", "mouse"])
     #
     # workflow name with -
     script = SoS_Script("""[proc-1]\n[test-case_2]""")
     self.assertEqual(sorted(script.workflows), ["proc-1", "test-case"])
     script.workflow("proc-1")
     script.workflow("proc-1 + test-case:2")
Example #7
1
    def testFiltering(self):
        result = ols(y=self.panel_y2, x=self.panel_x2)

        x = result._x
        index = x.index.get_level_values(0)
        index = Index(sorted(set(index)))
        exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)])
        self.assertTrue
        (exp_index.equals(index))

        index = x.index.get_level_values(1)
        index = Index(sorted(set(index)))
        exp_index = Index(["A", "B"])
        self.assertTrue(exp_index.equals(index))

        x = result._x_filtered
        index = x.index.get_level_values(0)
        index = Index(sorted(set(index)))
        exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3), datetime(2000, 1, 4)])
        self.assertTrue(exp_index.equals(index))

        assert_almost_equal(result._y.values.flat, [1, 4, 5])

        exp_x = [[6, 14, 1], [9, 17, 1], [30, 48, 1]]
        assert_almost_equal(exp_x, result._x.values)

        exp_x_filtered = [[6, 14, 1], [9, 17, 1], [30, 48, 1], [11, 20, 1], [12, 21, 1]]
        assert_almost_equal(exp_x_filtered, result._x_filtered.values)

        self.assertTrue(result._x_filtered.index.levels[0].equals(result.y_fitted.index))
Example #8
0
def prvars(varNames=None):
    if not debugging:
        return
    if isinstance(varNames, str):
        vnList = varNames.split()
    caller = inspect.stack()[1]
    cLocals = caller[0].f_locals  # local variables of caller
    # print cLocals
    fileLine = caller[2]
    functionName = caller[3]
    filename = caller[0].f_code.co_filename
    output = "%s():%d" % (functionName, fileLine)
    outputForSelf = " " * len(output)
    printAllSelf = False
    if varNames == None:
        for vn in sorted(cLocals.keys()):
            output += " %s=%r" % (vn, cLocals[vn])
        if cLocals.has_key("self"):
            printAllSelf = True
    else:
        for vn in vnList:
            if vn.startswith("self."):
                output += _prVarsSelf(cLocals, vn)
            elif cLocals.has_key(vn):
                output += " %s=%r" % (vn, cLocals[vn])
                if vn == "self":
                    printAllSelf = True
    if printAllSelf:
        selfOb = cLocals["self"]
        for insVar in sorted(selfOb.__dict__.keys()):
            val = selfOb.__dict__[insVar]
            output += "\n" + outputForSelf + " self.%s=%r" % (insVar, val)
    sys.stderr.write(output + "\n")
Example #9
0
 def check(builders):
     self.assertEqual(sorted(builders.keys()), sorted(["mybld", "yourbld"]))
     self.assertTrue(os.path.exists(os.path.join(self.basedir, "myblddir")))
     self.assertTrue(os.path.exists(os.path.join(self.basedir, "yourblddir")))
     # 'my' should still be the same slavebuilder object
     self.assertEqual(id(slavebuilders["my"]), id(builders["mybld"]))
     slavebuilders["your"] = builders["yourbld"]
Example #10
0
File: fk1fk1.py Project: frecar/dw
def fk1fk1(transactions, k=0, support_count=0):
    itemSets = candidate_generations(transactions, k - 1)
    k1min1Sets = get_k_sets(itemSets, k - 1)
    k1min2Sets = get_k_sets(itemSets, k - 1)

    for itemSet in itemSets:
        for transaction in transactions:
            if itemSet.items_in(transaction.items):
                itemSet.support_count += 1

    candidates = []

    k1min1Sets = prune(k1min1Sets, 3)
    k1min2Sets = prune(k1min2Sets, 3)

    for sett in k1min1Sets:
        for s in k1min2Sets:

            s.items = sorted(s.items)
            sett.items = sorted(sett.items)

            if s.items[0] == sett.items[0]:
                itemSet = ItemSet()
                itemSet.items.extend(sett.items[1:])
                itemSet.items.extend(s.items)

                if itemSet not in candidates and len(set(itemSet.items)) >= k:
                    candidates.append(itemSet)

    return candidates
    def _makeBackupPeriodRanges(self, rows):
        periods = []
        dict_periods = {}

        rows = [
            (play_number, t.renderContents())
            for play_number, row in rows
            for t in row.findAll("td")
            if len(row.findAll("td")) == 1
        ]
        for play_number, row in rows:
            match = re.search(".*(?P<num>\d)(st|nd|rd|th)\s+(?P<period_type>(Quarter|Overtime))\s+Summary.*", row)
            if match:
                period_name = "%s %s" % (match.group("num"), match.group("period_type"))
                # period_number = constants.PERIODS.index(period_name)
                period_number = constants.PERIODS[period_name]
                periods.append((period_number, play_number, period_name))

        for period_number, play_number, period_name in sorted(periods):

            dict_periods[period_number] = {"start": play_number}
            try:
                dict_periods[period_number]["end"] = [itm[1] for itm in sorted(periods) if itm[0] == period_number + 1][
                    0
                ] - 1
            except:
                dict_periods[period_number]["end"] = play_number + 300

        return dict_periods
Example #12
0
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
    refnames = variables["refnames"].strip()
    if refnames.startswith("$Format"):
        if verbose:
            print("variables are unexpanded, not using")
        return {}  # unexpanded, so not in an unpacked git-archive tarball
    refs = set([r.strip() for r in refnames.strip("()").split(",")])
    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
    TAG = "tag: "
    tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
    if not tags:
        # Either we're using git < 1.8.3, or there really are no tags. We use
        # a heuristic: assume all version tags have a digit. The old git %d
        # expansion behaves like git log --decorate=short and strips out the
        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
        # between branches and tags. By ignoring refnames without digits, we
        # filter out many common branch names like "release" and
        # "stabilization", as well as "HEAD" and "master".
        tags = set([r for r in refs if re.search(r"\d", r)])
        if verbose:
            print("discarding '%s', no digits" % ",".join(refs - tags))
    if verbose:
        print("likely tags: %s" % ",".join(sorted(tags)))
    for ref in sorted(tags):
        # sorting will prefer e.g. "2.0" over "2.0rc1"
        if ref.startswith(tag_prefix):
            r = ref[len(tag_prefix) :]
            if verbose:
                print("picking %s" % r)
            return {"version": r, "full": variables["full"].strip()}
    # no suitable tags, so we use the full revision id
    if verbose:
        print("no suitable tags, using full revision id")
    return {"version": variables["full"].strip(), "full": variables["full"].strip()}
Example #13
0
def check_ver(arg):
    ver_histroy = {
        "20080307": "v3 or v4 or v5",
        "20080324": "v5 above",
        "20080807": "5.1 or 5.2",
        "20081009": "v5.1sp",
        "20081218": "5.1sp",
        "20090810": "5.5",
        "20090912": "5.5",
        "20100803": "5.6",
        "20101021": "5.3",
        "20111111": "v5.7 or v5.6 or v5.5",
        "20111205": "5.7.18",
        "20111209": "5.6",
        "20120430": "5.7SP or 5.7 or 5.6",
        "20120621": "5.7SP1 or 5.7 or 5.6",
        "20120709": "5.6",
        "20121030": "5.7SP1 or 5.7",
        "20121107": "5.7",
        "20130608": "V5.6-Final",
        "20130922": "V5.7SP1",
    }
    ver_list = sorted(list(ver_histroy.keys()))
    ver_list.append(arg)
    sorted_ver_list = sorted(ver_list)
    return ver_histroy[ver_list[sorted_ver_list.index(arg) - 1]]
Example #14
0
 def start_event(self):
     if not self.used and not self.in_progress:
         results = db.c.fetch_all(
             "SELECT song_id, entry_votes FROM r4_election_entries WHERE elec_id = %s", (self.id,)
         )
         for song in self.songs:
             for song_result in results:
                 if song_result["song_id"] == song.id:
                     song.data["entry_votes"] == song_result["entry_votes"]
                     # Auto-votes for somebody's request
                 if song.data["entry_type"] == ElecSongTypes.request:
                     if (
                         db.c.fetch_var(
                             "SELECT COUNT(*) FROM r4_vote_history WHERE user_id = %s AND elec_id = %s",
                             (song.data["elec_request_user_id"], self.id),
                         )
                         == 0
                     ):
                         song.data["entry_votes"] += 1
         random.shuffle(self.songs)
         self.songs = sorted(self.songs, key=lambda song: song.data["entry_type"])
         self.songs = sorted(self.songs, key=lambda song: song.data["entry_votes"])
         self.songs.reverse()
         self.in_progress = True
         db.c.update(
             "UPDATE r4_elections SET elec_in_progress = TRUE, elec_start_actual = %s WHERE elec_id = %s",
             (time.time(), self.id),
         )
Example #15
0
 def test_get_paths(self):
     scheme = get_paths()
     default_scheme = _get_default_scheme()
     wanted = _expand_vars(default_scheme, None)
     wanted = sorted(wanted.items())
     scheme = sorted(scheme.items())
     self.assertEqual(scheme, wanted)
    def testStorageValue(self):
        logic = MeshStatisticsLogic()
        print " Test storage of Values: "
        arrayValue = vtk.vtkDoubleArray()
        arrayMask = vtk.vtkDoubleArray()
        for i in range(0, 1000, 2):
            arrayValue.InsertNextValue(i)
            arrayValue.InsertNextValue(i)
            arrayMask.InsertNextValue(0.0)
            arrayMask.InsertNextValue(0.0)
        listOfRandomNumber = list()
        del listOfRandomNumber[:]
        for i in range(0, 250):
            listOfRandomNumber.append(randint(0, 998))

        listOfRandomNumber = list(set(listOfRandomNumber))
        listOfRandomNumber = sorted(listOfRandomNumber)
        for index in listOfRandomNumber:
            arrayMask.SetValue(index, 1.0)
        bool, array = logic.defineArray(arrayValue, arrayMask)
        array = sorted(array)
        a = 0
        for i in listOfRandomNumber:
            if arrayValue.GetValue(i) != array[a]:
                print "        Failed", a, array[a], i, arrayValue.GetValue(i)
                return False
            a += 1
        print "         Passed"
        return True
Example #17
0
    def testInput(self):
        """Test input directive"""
        self.touch(["a.txt", "b.txt", "a.pdf", "a0", "a1"])
        script = SoS_Script(
            """
[0]
files = ['a.txt', 'b.txt']

input: 'a.pdf', files

"""
        )
        wf = script.workflow("default")
        Base_Executor(wf).dryrun()
        #
        # test input types
        script = SoS_Script(
            """
[0:shared={'i':'input', 'o':'output'}]
files = ("a${i}" for i in range(2))
input: {'a.txt', 'b.txt'}, files
output: ("a${x}" for x in _input)

"""
        )
        wf = script.workflow()
        Base_Executor(wf).dryrun()
        self.assertEqual(sorted(env.sos_dict["i"]), ["a.txt", "a0", "a1", "b.txt"])
        self.assertEqual(sorted(env.sos_dict["o"]), ["aa.txt", "aa0", "aa1", "ab.txt"])
Example #18
0
    def test_save_and_load_builder(self):
        df = self.df
        tmpPath = tempfile.mkdtemp()
        shutil.rmtree(tmpPath)
        df.write.json(tmpPath)
        actual = self.sqlCtx.read.json(tmpPath)
        self.assertEqual(sorted(df.collect()), sorted(actual.collect()))

        schema = StructType([StructField("value", StringType(), True)])
        actual = self.sqlCtx.read.json(tmpPath, schema)
        self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))

        df.write.mode("overwrite").json(tmpPath)
        actual = self.sqlCtx.read.json(tmpPath)
        self.assertEqual(sorted(df.collect()), sorted(actual.collect()))

        df.write.mode("overwrite").options(noUse="this options will not be used in save.").format("json").save(
            path=tmpPath
        )
        actual = self.sqlCtx.read.format("json").load(path=tmpPath, noUse="this options will not be used in load.")
        self.assertEqual(sorted(df.collect()), sorted(actual.collect()))

        defaultDataSourceName = self.sqlCtx.getConf("spark.sql.sources.default", "org.apache.spark.sql.parquet")
        self.sqlCtx.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
        actual = self.sqlCtx.load(path=tmpPath)
        self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
        self.sqlCtx.sql("SET spark.sql.sources.default=" + defaultDataSourceName)

        shutil.rmtree(tmpPath)
Example #19
0
    def __init__(self, waveReader, onsetSamples):
        self.samples = waveReader.channels[0]
        self.segments = []
        self.onsets = [0]

        crossings = [i for i in xrange(len(self.samples) - 1) if self.samples[i] < 0 and self.samples[i + 1] >= 0]

        for onset in onsetSamples:
            self.onsets.append(crossings[bisect(crossings, onset) - 1])

        self.onsets = sorted(list(set(self.onsets)))

        for i in xrange(len(self.onsets) - 1):
            s = Segment(self.samples[self.onsets[i] : self.onsets[i + 1]])
            self.segments.append(s)

        simMatrix = self.findNeighborMatrix()

        smCopy = array(simMatrix, copy=True)
        fill_diagonal(smCopy, 0)

        sims = sorted(list(smCopy.reshape(-1)), reverse=True)[:TARGET_NUM_JUMPS]
        SIMILARITY_THRESHOLD = sims[-1]

        print "Using similarity threshold = ", SIMILARITY_THRESHOLD

        for i in xrange(len(self.segments)):
            self.segments[i].neighbors = [
                j for j in xrange(len(simMatrix[i])) if simMatrix[i][j] >= SIMILARITY_THRESHOLD and abs(i - j) > 10
            ]
Example #20
0
def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, todo="square"):
    """Run squaring or merging analysis using bcbio.variation.recall.
    """
    ref_file = tz.get_in(("reference", "fasta", "base"), data)
    cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
    resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
    # adjust memory by cores but leave room for run program memory
    memcores = int(math.ceil(float(cores) / 5.0))
    jvm_opts = config_utils.adjust_opts(
        resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
        {"algorithm": {"memory_adjust": {"direction": "increase", "magnitude": memcores}}},
    )
    # Write unique VCFs and BAMs to input file
    input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
    with open(input_file, "w") as out_handle:
        out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
        if todo == "square":
            out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
    variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
    cmd = (
        ["bcbio-variation-recall", todo]
        + jvm_opts
        + broad.get_default_jvm_opts()
        + ["-c", cores, "-r", bamprep.region_to_gatk(region)]
    )
    if todo == "square":
        cmd += ["--caller", variantcaller]
    cmd += [out_file, ref_file, input_file]
    bcbio_env = utils.get_bcbio_env()
    cmd = " ".join(str(x) for x in cmd)
    do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
    return out_file
Example #21
0
    def expand_macro(self, formatter, name, args):
        from trac.config import Option

        section_filter = key_filter = ""
        args, kw = parse_args(args)
        if args:
            section_filter = args.pop(0).strip()
        if args:
            key_filter = args.pop(0).strip()

        registry = Option.get_registry(self.compmgr)
        sections = {}
        for (section, key), option in registry.iteritems():
            if section.startswith(section_filter):
                sections.setdefault(section, {})[key] = option

        return tag.div(class_="tracini")(
            (
                tag.h3(tag.code("[%s]" % section), id="%s-section" % section),
                tag.table(class_="wiki")(
                    tag.tbody(
                        tag.tr(
                            tag.td(tag.tt(option.name)),
                            tag.td(format_to_oneliner(self.env, formatter.context, to_unicode(option.__doc__))),
                        )
                        for option in sorted(sections[section].itervalues(), key=lambda o: o.name)
                        if option.name.startswith(key_filter)
                    )
                ),
            )
            for section in sorted(sections)
        )
Example #22
0
File: solvers.py Project: jinz/YACS
 def __hash__(self):
     if self._hash is None:
         if self._old_hash:
             self._hash = hash(tuple(sorted(self.new.items()))) ^ self._old_hash
         else:
             self._hash = hash(tuple(sorted(self.items())))
     return self._hash
 def test_edges_data(self):
     G = self.K3
     assert_equal(
         sorted(G.edges(data=True)), [(0, 1, {}), (0, 2, {}), (1, 0, {}), (1, 2, {}), (2, 0, {}), (2, 1, {})]
     )
     assert_equal(sorted(G.edges(0, data=True)), [(0, 1, {}), (0, 2, {})])
     assert_raises((KeyError, networkx.NetworkXError), G.neighbors, -1)
Example #24
0
def print_ships():
    import pprint

    Part.load_all()
    if SKIPPED_KEYS:
        print "skipped:"
        pprint.pprint(SKIPPED_KEYS)

    ships = Ship.load_all()
    if SKIPPED_KEYS:
        print "skipped:"
        pprint.pprint(SKIPPED_KEYS)

    ship = ships[0]
    print "ship", ship, "stages:", len(ship.stages)
    eng = [(th.istg, th.dstg, th.sqor, th) for th in ship.part.values() if th.part_type.is_engine()]
    eng.sort()
    print "engines:", len(eng)
    pprint.pprint(eng)

    for s in ship.stages:
        print "-------------------"
        print "stage:", s.stage_num, "mass:", s.mass()
        print "detach:", sorted(s.detach)
        print "ignite:", sorted(s.ignition)
        print "thrusters:", sorted(s.available_thrusters())
Example #25
0
def cleanupFiles():
    # First get rid of modified files
    for l in ["l1", "l2", "l3"]:
        arcpy.Delete_management(l)

    for f in glob.glob("C:\\Arctmp\\*"):
        try:
            shutil.rmtree(f)
        except:
            print "UNABLE TO REMOVE:", f
    # Now remove the old directory
    for i in xrange(0, 1000000):
        new_workspace = "C:\\Arctmp\\workspace." + str(i)
        if not os.path.exists(new_workspace):
            break
    print "TESTING USING WORKSPACE", new_workspace
    # Now move in fresh copies
    shutil.copytree("C:\\Arcbase", new_workspace)
    print "CONTENTS:"
    arcpy.env.workspace = new_workspace
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.shp")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.lyr")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.gdb")):
        print f
    def generate_query_list(self, search_context):
        """
        Returns the list of predetermined queries for the specified user.
        """
        topic = search_context.topic

        queries = []
        queries_file = open(self.__query_filename, "r")

        for line in queries_file:
            line = line.strip()
            line = line.split(",")

            line_qid = line[0]
            line_user = line[1]
            line_topic = line[2]
            line_terms = " ".join(line[3:])

            if line_user == self.__user and line_topic == topic.id:
                queries.append((line_terms, int(line_qid)))

        queries_file.close()

        sorted(queries, key=lambda x: x[1])

        return queries
    def _print_table(table, seen, accum=None):
        accum = accum or []
        foreign_keys = database.foreign_keys[table]
        for foreign_key in foreign_keys:
            dest = foreign_key.dest_table

            # In the event the destination table has already been pushed
            # for printing, then we have a reference cycle.
            if dest in accum and table not in accum:
                print_("# Possible reference cycle: %s" % dest)

            # If this is not a self-referential foreign key, and we have
            # not already processed the destination table, do so now.
            if dest not in seen and dest not in accum:
                seen.add(dest)
                if dest != table:
                    _print_table(dest, seen, accum + [table])

        print_("class %s(BaseModel):" % database.model_names[table])
        columns = database.columns[table].items()
        if not preserve_order:
            columns = sorted(columns)
        primary_keys = database.primary_keys[table]
        for name, column in columns:
            skip = all(
                [
                    name in primary_keys,
                    name == "id",
                    len(primary_keys) == 1,
                    column.field_class in introspector.pk_classes,
                ]
            )
            if skip:
                continue
            if column.primary_key and len(primary_keys) > 1:
                # If we have a CompositeKey, then we do not want to explicitly
                # mark the columns as being primary keys.
                column.primary_key = False

            print_("    %s" % column.get_field())

        print_("")
        print_("    class Meta:")
        print_("        db_table = '%s'" % table)
        multi_column_indexes = database.multi_column_indexes(table)
        if multi_column_indexes:
            print_("        indexes = (")
            for fields, unique in sorted(multi_column_indexes):
                print_("            ((%s), %s)," % (", ".join("'%s'" % field for field in fields), unique))
            print_("        )")

        if introspector.schema:
            print_("        schema = '%s'" % introspector.schema)
        if len(primary_keys) > 1:
            pk_field_names = sorted([field.name for col, field in columns if col in primary_keys])
            pk_list = ", ".join("'%s'" % pk for pk in pk_field_names)
            print_("        primary_key = CompositeKey(%s)" % pk_list)
        print_("")

        seen.add(table)
Example #28
0
def test_format(obj, precision=6):
    tf = lambda o: test_format(o, precision)
    delimit = lambda o: ", ".join(o)
    otype = type(obj)
    if otype is str:
        return "'%s'" % obj
    elif otype is float or otype is int:
        if otype is int:
            obj = float(obj)
        fstr = "%%.%df" % precision
        return fstr % obj
    elif otype is set:
        if len(obj) == 0:
            return "set()"
        return "{%s}" % delimit(sorted(map(tf, obj)))
    elif otype is dict:
        return "{%s}" % delimit(sorted(tf(k) + ": " + tf(v) for k, v in obj.items()))
    elif otype is list:
        return "[%s]" % delimit(map(tf, obj))
    elif otype is tuple:
        return "(%s%s)" % (delimit(map(tf, obj)), "," if len(obj) is 1 else "")
    elif otype.__name__ in ["Vec", "Mat"]:
        entries = delimit(map(tf, sorted(filter(lambda o: o[1] != 0, obj.f.items()))))
        return "<%s %s {%s}>" % (otype.__name__, test_format(obj.D), entries)
    else:
        return str(obj)
    def _check_titles(self, fname, titles):
        expected_titles = [
            "problem description",
            "proposed change",
            "implementation",
            "testing",
            "documentation impact",
            "references",
        ]
        self.assertEqual(
            sorted(expected_titles), sorted(titles.keys()), "Expected titles not found in document %s" % fname
        )

        try:
            proposed = "proposed change"
            self.assertIn("alternatives", titles[proposed])
            self.assertIn("dependencies", titles[proposed])
            self.assertIn("deployer impact", titles[proposed])
            self.assertIn("developer impact", titles[proposed])
            self.assertIn("end user impact", titles[proposed])
            self.assertIn("performance impact", titles[proposed])
            try:
                self.assertIn("playbook impact", titles[proposed])
            except AssertionError:
                self.assertIn("playbook/role impact", titles[proposed])
            self.assertIn("security impact", titles[proposed])
            self.assertIn("upgrade impact", titles[proposed])

            impl = "implementation"
            self.assertIn("assignee(s)", titles[impl])
            self.assertIn("work items", titles[impl])
        except Exception as exp:
            raise SystemExit("Failed on file %s - Error %s" % (fname, exp))
Example #30
0
    def _ancestorSortKeys(tree, aKey="Ancestor"):
        """ sorting keys of replicationTree by its hopAncestor value

    replicationTree is a dict ( channelID : { ... }, (...) }

    :param self: self reference
    :param dict tree: replication tree  to sort
    :param str aKey: a key in value dict used to sort
    """
        if False in [bool(aKey in v) for v in tree.values()]:
            return S_ERROR("ancestorSortKeys: %s key in not present in all values" % aKey)
        # # put parents of all parents
        sortedKeys = [k for k in tree if aKey in tree[k] and not tree[k][aKey]]
        # # get children
        pairs = dict([(k, v[aKey]) for k, v in tree.items() if v[aKey]])
        while pairs:
            for key, ancestor in dict(pairs).items():
                if key not in sortedKeys and ancestor in sortedKeys:
                    sortedKeys.insert(sortedKeys.index(ancestor), key)
                    del pairs[key]
        # # need to reverse this one, as we're inserting child before its parent
        sortedKeys.reverse()
        if sorted(sortedKeys) != sorted(tree.keys()):
            return S_ERROR("ancestorSortKeys: cannot sort, some keys are missing!")
        return S_OK(sortedKeys)