Example #1
1
    def _create_grid(self):

        from bempp.api import GridFactory
        from collections import OrderedDict

        vertices = self._impl.vertices
        elements = self._impl.elements

        factory = GridFactory()

        self._element_file_to_insertion_indices = OrderedDict.fromkeys(elements.keys(), value=-1)
        self._vertex_file_to_insertion_indices = OrderedDict.fromkeys(vertices.keys(), value=-1)

        vertex_insertion_count = 0
        element_insertion_count = 0

        for key in elements:
            elem = elements[key]
            elem_vertex_keys = []
            for vertex_key in elem["data"]:
                if self._vertex_file_to_insertion_indices[vertex_key] == -1:
                    factory.insert_vertex(vertices[vertex_key])
                    self._vertex_file_to_insertion_indices[vertex_key] = vertex_insertion_count
                    self._vertex_insertion_indices_to_file.append(vertex_key)
                    vertex_insertion_count += 1
                elem_vertex_keys.append(self._vertex_file_to_insertion_indices[vertex_key])
            factory.insert_element(elem_vertex_keys, domain_index=elem["domain_index"])
            self._element_file_to_insertion_indices[key] = element_insertion_count
            self._element_insertion_indices_to_file.append(key)
            element_insertion_count += 1

        return factory.finalize()
Example #2
1
 def unique_parameters(self):
     # We actually need to remove duplicates from the list of parameters
     # (and their corresponding gradients) in order to support reusing
     # the same layer at multiple places in the graph,
     # e.g. do weight sharing.
     params, grads = self.parameters()
     return (list(_OrderedDict.fromkeys(params).keys()), list(_OrderedDict.fromkeys(grads).keys()))
 def __init__(self, graph, s):
     assert isinstance(graph, (Graph, DiGraph))
     self.graph = graph
     self.s = s
     self.marked = OrderedDict.fromkeys(self.graph.vertices, value=False)
     self.edge_to = OrderedDict.fromkeys(self.graph.vertices, value=None)
     self.__dfs(s)
Example #4
1
    def _preset(self):
        # Calculate all of rolling rectangles.
        tmp = []
        for rect in self.__arr_rects:
            t = deque(rect)
            for _ in range(len(t)):
                t.rotate(1)
                tmp.append(list(t))
        self.__arr_rects = tmp

        # Order by rectangle area and length, width.
        self.__arr_order_rects = OrderedDict.fromkeys(sorted([rect[0] * rect[1] for rect in self.__arr_rects]), 1)
        for rect in self.__arr_rects:
            item = rect.pop()
            tmp = sorted(rect)
            tmp.append(item)
            self.__arr_order_rects[rect[0] * rect[1]] = tmp
        print(self.__arr_order_rects)

        self.__arr_res = [rect[2] for _, rect in self.__arr_order_rects.items()]
        # Change the dict's key to order key.
        tmp = deepcopy(self.__arr_order_rects)
        self.__arr_order_rects = OrderedDict.fromkeys([_ for _ in range(len(tmp))])
        index = 0
        for _, value in tmp.items():
            self.__arr_order_rects[index] = value
            index += 1
    def __init__(self, distro, repository_url, name, version, version_patch):
        self.packages = []
        self.distro = distro
        self.repository_url = repository_url
        package = self._parse_package_file(self._get_package_xml_url(repository_url, name, version))
        self.name = package.name
        self.version = package.version
        self.version_patch = version_patch
        self.licenses = package.licenses
        self.run_dependencies = list(OrderedDict.fromkeys([dependency.name for dependency in package.run_depends]))
        self.build_dependencies = list(
            OrderedDict.fromkeys([dependency.name for dependency in package.build_depends + package.buildtool_depends])
        )
        # Build dependencies already added:
        if "git" in self.build_dependencies:
            self.build_dependencies.remove("git")
        if "cmake" in self.build_dependencies:
            self.build_dependencies.remove("cmake")

        # Remove HTML tags from description
        self.description = re.sub("<[^<]+?>", "", package.description)
        # Put it on one line to motivate packagers to make shorter descriptions
        self.description = re.sub("\n", " ", self.description)
        # Multiple consecutive spaces turned into one
        self.description = re.sub("([ ]+)", " ", self.description)
        # Only take the first sentence (keep short description)
        self.description = self.description.split(".")[0] + "."
        # Handle quotes
        self.description = self.description.replace('"', "").replace("`", "").replace("&quot;", "").replace("'", "")
Example #6
1
def map():
    form = FilterMapForm()
    red_icon_url = (
        "https://lh3.ggpht.com/hx6IeSRualApBd7KZB9s2N7bcHZIjtgr9VEuOxHzpd05_CZ6RxZwehpXCRN-1ps3HuL0g8Wi=w9-h9"
    )
    map_points = {}
    map_data = {}
    if form.validate_on_submit():
        response = Location.query.filter(
            str(form.water.data) == Location.water,
            str(form.food.data) == Location.food,
            str(form.supplies.data) == Location.supplies,
            str(form.shelter.data) == Location.shelter,
        ).all()
        for field in form:
            map_points[field.id] = [(document.latitude, document.longitude) for document in response]
            map_data[field.id] = [
                "<b>" + document.location_name + "</b><br>" + document.address + "<br>" + document.resources
                for document in response
            ]
    else:
        for field in form:
            map_points[field.id] = []
            map_data[field.id] = []
            if field.type not in ["CSRFTokenField", "HiddenField"]:
                response = Location.query.filter(getattr(Location, field.id) == "True").all()
                map_points[field.id] = [(document.latitude, document.longitude) for document in response]
                map_data[field.id] = [
                    "<b>"
                    + document.location_name
                    + "</b><br>"
                    + document.address
                    + "<br>"
                    + document.resources
                    + " - "
                    + document.hours
                    for document in response
                ]
    generated_map = Map(
        "map",
        "34.0386",
        "-80.9675",
        zoom=13,
        style="height:100%;width:100%;",
        infobox=list(
            OrderedDict.fromkeys(map_data["water"] + map_data["food"] + map_data["supplies"] + map_data["shelter"])
        ),
        markers={
            red_icon_url: list(
                OrderedDict.fromkeys(
                    map_points["water"] + map_points["food"] + map_points["supplies"] + map_points["shelter"]
                )
            )
        },
    )
    return render_template("map.html", form=form, map=generated_map)
Example #7
0
def student(request, dept_code):
    try:
        dept = Department.objects.get(dept_code=dept_code)
        context_dict["Department"] = dept
    except:
        raise Http404
    try:
        btech = Student.objects.filter(dept=dept_code, degree=1).values("year_of_admission")
        idd = Student.objects.filter(dept=dept_code, degree=2).values("year_of_admission")

        btech_years = list()
        idd_years = list()

        for i in btech:
            btech_years.append(i["year_of_admission"])
        for i in idd:
            idd_years.append(i["year_of_admission"])

        btech_years = sorted(list(OrderedDict.fromkeys(btech_years)), reverse=True)
        idd_years = sorted(list(OrderedDict.fromkeys(idd_years)), reverse=True)

        students_list_btech = list()
        counter = 0
        for i in btech_years:
            students_list_btech.append(
                Student.objects.filter(dept=dept_code, degree=1, year_of_admission=btech_years[counter]).order_by(
                    "roll_no"
                )
            )
            counter += 1

        students_list_idd = list()
        counter = 0
        for i in idd_years:
            students_list_idd.append(
                Student.objects.filter(dept=dept_code, degree=2, year_of_admission=idd_years[counter]).order_by(
                    "roll_no"
                )
            )
            counter += 1

        headings_btech = ["B.Tech Part - I", "B.Tech Part - II", "B.Tech Part - III", "B.Tech Part - IV"]
        headings_idd = ["IDD Part - I", "IDD Part - II", "IDD Part - III", "IDD Part - IV", "IDD Part - V"]

        # Every value in each counter needs to be different.
        counter1 = [1, 2, 3, 4]
        counter2 = [11, 22, 33, 44, 55]
        full_list_btech = izip_longest(counter1, headings_btech, students_list_btech)
        full_list_idd = izip_longest(counter2, headings_idd, students_list_idd)
        context_dict["full_list_btech"] = full_list_btech
        context_dict["full_list_idd"] = full_list_idd

    except:
        raise Http404
    return render(request, "student.html", context_dict)
def StatusReport():

	try:
		if(ProxyCounter <= 0):
			print("\nNo proxies found!\n")
		else:
			print("\n\n\t---------------------------------------\n\tEstimated Proxies: %d\n\n" % ProxyCounter )

			exportToFile = raw_input("\tExport to file? (Y/n): ");
			if("N" in exportToFile or "n" in exportToFile):
				pass
			else:
				SaveFile()

			# prompt to view the list of results
			if(NoSuccessCounter > 0):
				viewList = raw_input( "\n\tView website statistics? (y/N): ");
				if("y" in viewList or "Y" in viewList):
					global SucceededWebsites
					global NoSuccessWebsites
					# remove duplicate entries from the lists
					uniqueSW = list(OrderedDict.fromkeys(SucceededWebsites))
					uniqueNW = list(OrderedDict.fromkeys(NoSuccessWebsites))
					
					print("\n\tWebsites without proxies:\n\t---------------------------------------\n")
					for url in uniqueNW:
						Log("\t" + url,"red")
						
					print("\n\n\tWebsites with proxies:\n\t---------------------------------------\n")
					for url in uniqueSW:
						Log("\t" + url,"green")
						
					exportToFile = raw_input("\n\tExport succeeded websites to file? (y/N): ")
					if(exportToFile == "Y" or exportToFile == "y"):
						with open("output/Succeeded websites.txt","w+") as f:
							urlList = []
							# read all lines from the file (if it exists)
							for urlFromFile in f.readlines():
								urlList.append(urlFromFile)
							# append all succeeded websites to the list
							for sucUrl in uniqueSW:
								urlList.append(sucUrl)
							
							unique = list(OrderedDict.fromkeys(urlList))
							for url in unique:
								f.write(url + os.linesep)

						print("\n\tFile has been saved!\n\tYou can find it under 'output/Succeeded websites.txt'\n")

			# wait until we hit enter to continue because the call to Menu will clear the screen
		raw_input("\n\tHit enter to return to the main menu...")
	except KeyboardInterrupt:
		sys.exit()
	Menu()
	return
def keyPreProcess(key):
    key = key.replace(" ", "")
    key = key.upper()
    # change the key to uppercase

    key = "".join([i for i in key if not i.isdigit()])
    keyArray = list(key)
    # making the key as array
    # check if j & i both exists

    for i in keyArray:
        for j in keyArray:
            if (i == "I") is True & (j == "J") is True:
                print "J & I both can not coexist in the keyword"
                sys.exit(0)
    ## keeping original order while having the key restored

    keyArray = list(OrderedDict.fromkeys(key))

    if type(key) is not str:
        out = "Please choose a word as a key next time"
        print out
        sys.exit(0)

    newKey = "".join([str(x) for x in keyArray])

    key = newKey
    out = "You entered key as " + key
    print out
    # upto this point the key is generated with no
    # duplication of character
    # the keyword is now collected
    # Creating the playfair matrix

    print "\n============Starting to generate the playfair matrix===========\n"

    # create a list from A to Z
    matr = list(string.ascii_uppercase)

    # join two lists
    newKeyArray = keyArray + matr
    newKey = "".join([str(x) for x in newKeyArray])  # join two lists
    keyArray = list(OrderedDict.fromkeys(newKey))  # ordering the characters without duplications
    keyArray.remove("J")  # removing J to consider I in the key only
    print "Playfair matrix as an array"
    print keyArray

    n = 5
    pfm = [keyArray[i : i + n] for i in range(0, len(keyArray), n)]  # 1d to 2D transformation for print
    # pyhtonic array print
    print "\n\n\n\n\nThe playfair keyword         \n\n"
    print ("\n".join(["".join(["{:4}".format(item) for item in row]) for row in pfm]))

    return keyArray
Example #10
0
    def __init__(self, names, Type, logOrder, isList=False):
        # self.names = rospy.get_param(names);
        self.names = names
        if isList:
            self.states = OrderedDict.fromkeys(self.names, [0.0 for elem in logOrder])
        else:
            self.states = OrderedDict.fromkeys(self.names, Type())

        self.Type = Type
        self.stateMux = Lock()
        self.logOrder = logOrder
        self.isList = isList
    def __init__(self, graph):
        assert isinstance(graph, Graph)
        self.graph = graph
        self.count = 0
        self.marked = OrderedDict.fromkeys(self.graph.vertices, False)
        self.connections = OrderedDict.fromkeys(self.graph.vertices, None)

        for v in self.graph.vertices:
            if self.marked[v] is True:
                continue
            self.__dfs(v)
            self.count += 1
Example #12
0
    def __init__(self, **kwargs):
        import os.path
        from collections import OrderedDict

        self._impl = None
        self._vertex_file_to_insertion_indices = None
        self._element_file_to_insertion_indices = None
        self._vertex_insertion_indices_to_file = []
        self._element_insertion_indices_to_file = []

        self._vertex_indices_to_file = None
        self._file_to_vertex_indices = OrderedDict()
        self._element_indices_to_file = None
        self._file_to_element_indices = OrderedDict()

        if "file_name" in kwargs:
            fname = kwargs["file_name"]
            extension = os.path.splitext(fname)[1].lower()

            if extension == ".msh":
                from bempp.api.file_interfaces import gmsh

                self._impl = gmsh.GmshInterface.read(fname)

            self._grid = self._create_grid()

            # Setup the mappers

            self._file_to_vertex_indices = OrderedDict.fromkeys(self._impl.vertices.keys(), value=-1)
            self._file_to_element_indices = OrderedDict.fromkeys(self._impl.elements.keys(), value=-1)

            self._vertex_indices_to_file = self._grid.leaf_view.entity_count(2) * [None]
            self._element_indices_to_file = self._grid.leaf_view.entity_count(0) * [None]

            index_set = self._grid.leaf_view.index_set()
            for elem in self._grid.leaf_view.entity_iterator(0):
                index = index_set.entity_index(elem)
                insertion_index = self._grid.element_insertion_index(elem)
                file_key = self._element_insertion_indices_to_file[insertion_index]
                self._element_indices_to_file[index] = file_key
                self._file_to_element_indices[file_key] = index

            for vertex in self._grid.leaf_view.entity_iterator(2):
                index = index_set.entity_index(vertex)
                insertion_index = self._grid.vertex_insertion_index(vertex)
                file_key = self._vertex_insertion_indices_to_file[insertion_index]
                self._vertex_indices_to_file[index] = file_key
                self._file_to_vertex_indices[file_key] = index
Example #13
0
def train_1(sessions):
    installed_apps = {}
    for session in sessions:
        for x in session:
            app = installed_apps.setdefault(x["application"], App(x["application"]))
            app.ocrs += 1.0

        if len(session) > 1:
            app_pkg_names = [x["application"] for x in session]
            for predecessor in list(OrderedDict.fromkeys(app_pkg_names)):
                successors = []
                indices = [i for i, x in enumerate(app_pkg_names) if x == predecessor]
                indices.append(len(session))
                for i in xrange(len(indices) - 1):
                    for j in xrange(indices[i] + 1, indices[i + 1]):
                        successor = session[j]["application"]
                        if successor not in successors:
                            app = installed_apps.setdefault(successor, App(successor))
                            app.pred_co_ocrs[predecessor] += 1.0
                            successors.append(successor)

    for pkg_name in installed_apps:
        successor = installed_apps[pkg_name]
        for p in successor.pred_co_ocrs:
            predecessor = installed_apps[p]
            successor.pred_influence[p] = compute_relatedness(
                successor.pred_co_ocrs[p], predecessor.ocrs, successor.ocrs, len(sessions), 5
            )
    return installed_apps
Example #14
0
 def process_file(
     mods=None, all_data=None, col_names=None, file_quant_cols=None, wp_median=None, df=None, df_collapsed=None
 ):
     for index, row in mods.iterrows():
         site = row["Site"].lower()
         protein = row[site_col]
         peptides = list(set(row["Peptide"].split(";")))
         for peptide in peptides:
             genes = all_data[(all_data[pep_col] == peptide) == True][gene_col].str.split(";").fillna("NA")
             gene = ";".join(list(OrderedDict.fromkeys([j for i in genes for j in i])))
             matches = all_data[all_data[pep_col] == peptide]
             df_col_index = (gene, protein, site)
             for quant_col in file_quant_cols:
                 med = ((np.log2(matches[quant_col]) if log_norm else matches[quant_col]) - wp_median).median()
                 for col_name in col_names[quant_col]:
                     df_index = (gene, protein, site, peptide)
                     if df_index in df and col_name in df[df_index]:
                         continue
                     try:
                         df[df_index][col_name] = med
                     except KeyError:
                         df[df_index] = {col_name: med}
                     try:
                         df_collapsed[df_col_index][col_name].add(med)
                     except KeyError:
                         if df_col_index in df_collapsed:
                             df_collapsed[df_col_index][col_name] = set([med])
                         else:
                             df_collapsed[df_col_index] = {col_name: set([med])}
Example #15
0
    def _construct_ordered_GET(self):
        """
        Construct a Global Edge Table (GET)

        The GET is an OrderedDict. Keys are scan  line numbers,
        ordered from bbox.ymin to bbox.ymax, where bbox is the
        bounding box of the polygon.
        Values are lists of edges for which edge.ymin==scan_line_number.

        Returns
        -------
        GET: OrderedDict
            {scan_line: [edge1, edge2]}
        """
        # edges is a list of Edge objects which define a polygon
        # with these vertices
        edges = self.get_edges()
        GET = OrderedDict.fromkeys(self._scan_line_range)
        ymin = np.asarray([e._ymin for e in edges])
        for i in self._scan_line_range:
            ymin_ind = (ymin == i).nonzero()[0]
            if ymin_ind.any():
                GET[i] = [edges[ymin_ind[0]]]
                for j in ymin_ind[1:]:
                    GET[i].append(edges[j])
        return GET
Example #16
0
def func_args_as_dict(func, args, kwargs):
    """
    Return given function's positional and key value arguments as an ordered
    dictionary.
    """
    arg_names = list(OrderedDict.fromkeys(itertools.chain(inspect.getargspec(func)[0], kwargs.keys())))
    return OrderedDict(list(six.moves.zip(arg_names, args)) + list(kwargs.items()))
Example #17
0
File: queries.py Project: OSSOS/MOP
    def all_blocks(self):

        status = OrderedDict.fromkeys(parameters.BLOCKS.keys())
        status["13AE"] = ["discovery complete", "50", "24.05"]
        status["13AO"] = ["discovery complete", "36", "24.40"]
        status["13BL"] = ["discovery complete", "79", "24.48"]
        status["14BH"] = ["discovery running", "-", "-"]
        status["15AP"] = ["discovery running", "-", "-"]
        status["15AM"] = ["discovery running", "-", "-"]

        """Overview tal table is expecting:
        ID   observations   processing status   discoveries  m_r 40%
        """
        bks = []
        for block in status.iterkeys():
            bk = [block, self.num_block_images(block)]  # if set in the .fromkeys(), doesn't give a unique list
            if status[block] is not None:
                bk = bk + status[block]
            else:
                bk = bk + ["awaiting triplets", "-", "-"]
            bks.append(bk)

        retval = {"blocks": bks, "status": status}

        return retval
Example #18
0
        def get_next_form(recno, recno_index):

            file_offset = 0
            recno_counter = 0

            for line in from_source:
                bits = line.split(delimiter)
                bits = map(lambda x: x.strip().decode("utf-8"), bits)
                bits = filter(lambda x: x, bits)
                if bits:
                    recno_counter += 1

                    bits = OrderedDict.fromkeys(bits).keys()  # stable unique
                    bits_prefixed = PyDic.common_prefix(bits)

                    bits_str = (PyDic.INTERNAL_DELIMITER.join(bits_prefixed)).encode("utf-8") + "\n"

                    recno.write(bits_str)

                    if to_path is None:
                        recno_index.append(file_offset)
                    else:

                        recno_index.write(struct.pack(PyDic.RECNO_INDEX_FMT, file_offset))

                    if verbose:
                        print >>sys.stderr, "[", recno_counter, "]", bits[0]

                    for bit in bits:
                        yield bit.lower(), (recno_counter,)
                    file_offset += len(bits_str)

            raise StopIteration
Example #19
0
def update_inventory(bot, trigger):
    """Set an inventory of your knives."""
    stripped_inventory = []
    if trigger.group(2):
        stripped_inventory = trigger.group(2).strip()
    else:
        bot.reply("Give me a space delimited list of imgur url")
        return NOLIMIT
    inventory_links = stripped_inventory.split()
    response = ""

    # remove duplicate links
    inventory_links = list(od.fromkeys(inventory_links))
    # TODO the link lookup thing is broken because imgur
    if not True:
        for link in inventory_links:
            response = get(link)
            if response.headers:
                if not response.headers["status"] == "200 OK":
                    response += 'Invalid url "{url}". Returned status {status}'.format(
                        url=link, status=response.headers["status"]
                    )
            else:
                response += 'Invalid url "{}". '.format(link)
            # Sleep for 300 ms
            # otherwise we hit the endpoint too much
            time.sleep(0.3)
    if response:
        bot.reply(response)
        return NOLIMIT
    bot.db.set_nick_value(trigger.nick, "blade_inventory", inventory_links)
    links = format_links(inventory_links)
    bot.reply("your inventory has been set to {urls}".format(urls=links))
Example #20
0
    def init(self, args):
        self._projname = args.name
        self._projdir = unicode(os.path.abspath(os.path.join(args.directory, self._projname)), "utf-8")
        self._lang = args.language
        self._package = args.package
        self._tpname = args.template

        # new official ways to get the template and cocos paths
        self._templates_paths = self.get_templates_paths()
        self._cocosroot = self.get_cocos2d_path()

        # search for custom paths
        if args.engine_path is not None:
            self._cocosroot = os.path.abspath(args.engine_path)
            self._cocosroot = unicode(self._cocosroot, "utf-8")
            tp_path = os.path.join(self._cocosroot, "templates")
            if os.path.isdir(tp_path):
                self._templates_paths.append(tp_path)

        # remove duplicates keeping order
        o = OrderedDict.fromkeys(self._templates_paths)
        self._templates_paths = o.keys()

        self._other_opts = args
        self._mac_bundleid = args.mac_bundleid
        self._ios_bundleid = args.ios_bundleid

        self._templates = Templates(args.language, self._templates_paths, args.template)
        if self._templates.none_active():
            self._templates.select_one()
Example #21
0
def research(request, dept_code):
    dept_code1 = dept_code[:2].upper()
    try:
        dept = Department.objects.get(dept_code=dept_code1)
        context_dict["Department"] = dept
    except:
        raise Http404
    try:
        supervisors = PhdResearch.objects.filter(dept=dept_code1).values("supervisor")

        sv_list = list()
        for i in supervisors:
            sv_list.append(i["supervisor"])
        sv_list = list(OrderedDict.fromkeys(sv_list))

        counter = list()
        num = "0"
        for i in sv_list:
            counter.append(num)
            num = str(int(num) + 1)

        z = zip(sv_list, counter)
        names = list()
        for i, j in z:
            print i, j
            s = "sv" + j
            context_dict[s] = PhdResearch.objects.filter(dept=dept_code1, supervisor=i)
            names.append(context_dict[s])

        full_list = izip_longest(sv_list, names)
        context_dict["full_list"] = full_list
    except:
        raise Http404
    html = dept_code[:2] + "/research.html"
    return render(request, html, context_dict)
Example #22
0
 def __init__(self, capture=None):
     self.captured = capture or StringIO()
     self.specs = list()
     self.current_spec = list()
     self.results = OrderedDict.fromkeys([SPECS, PASSED, FAILED, ERRORS, SKIPPED_SPECS, SKIPPED])
     self.finished = None
     self.started = time.time()
Example #23
0
def read_trades(trade_file):
    """
    Reads in the trade file and returns relevant info
    returns: start_date, end_date, symbols, trades
    @start_date: date object for start of trading
    @end_date: date object for end of trading
    @symbols: list of symbols being traded
    @trades: list of trades being executed
    """
    trades = []

    # Read in the csv file and create tuples for each trade
    # this way we can access each trade like: trade[0].symbol, etc
    with open(trade_file, "rb") as f:
        reader = csv.reader(f)
        Trade = namedtuple("Trade", ["date", "symbol", "command", "amount"])
        for row in reader:
            date = dt.datetime(int(row[0]), int(row[1]), int(row[2]), 16, 0, 0)
            trades.append(Trade(date, row[3], row[4], row[5]))

    # Now we want to sort the trades by date, just in case they are not in
    # correct order
    trades.sort(key=lambda tup: tup.date)

    # Grab the start and end date of the trades
    start_date = trades[0].date
    end_date = trades[-1].date

    # Grab all of the symbols from the orders
    symbols = [x.symbol for x in trades]
    symbols = OrderedDict.fromkeys(symbols).keys()

    return start_date, end_date, symbols, trades
Example #24
0
def country_localized_display_name(country_code):
    """
    Get the display name for a country code in the local language
    e.g. Россия for Russia, España for Spain, etc.

    For most countries there is a single official name. For countries
    with more than one official language, this will return a concatenated
    version separated by a slash e.g. Maroc / المغرب for Morocco.

    Note that all of the exceptions in road_sign_languages.tsv are also
    taken into account here so India for example uses the English name
    rather than concatenating all 27 toponyms.

    This method should be roughly consistent with OSM's display names.

    Usage:
        >>> country_official_name('jp')     # returns '日本'
        >>> country_official_name('be')     # returns 'België / Belgique / Belgien'
    """

    country_code = country_code.lower()
    if not country_official_names:
        init_country_names()
    return " / ".join(
        OrderedDict.fromkeys(n.replace("-", " ") for n in country_official_names[country_code].values()).keys()
    )
Example #25
0
def train_2(sessions):
    installed_apps = {}
    for session in sessions:
        for x in [x["application"] for x in session]:
            app = installed_apps.setdefault(x, App(x))
            app.ocrs += 1

        if len(session) > 1:
            app_pkg_names = [x["application"] for x in session]
            for predecessor in list(OrderedDict.fromkeys(app_pkg_names)):
                index = app_pkg_names.index(predecessor)
                for i in xrange(index + 1, len(session)):
                    successor = app_pkg_names[i]
                    app = installed_apps.setdefault(successor, App(successor))
                    app.pred_co_ocrs[predecessor] += 1.0

    for pkg_name in installed_apps:
        successor = installed_apps[pkg_name]
        for p in successor.pred_co_ocrs:
            predecessor = installed_apps[p]
            successor.pred_influence[p] = compute_relatedness(
                successor.pred_co_ocrs[p], predecessor.ocrs, successor.ocrs, len(sessions), 5
            )

    results = defaultdict(list)
    for session in sessions:
        apps = [x["application"] for x in session]
        counts = Counter(apps)
        for app in counts:
            results[app].append(counts[app])

    return installed_apps
Example #26
0
    def RunStep(self):
        self["full_revision_list"] = list(OrderedDict.fromkeys(self._options.revisions))
        port_revision_list = []
        for revision in self["full_revision_list"]:
            # Search for commits which matches the "Port rXXX" pattern.
            git_hashes = self.GitLog(
                reverse=True, format="%H", grep="Port r%d" % int(revision), branch="svn/bleeding_edge"
            )
            for git_hash in git_hashes.splitlines():
                svn_revision = self.GitSVNFindSVNRev(git_hash, "svn/bleeding_edge")
                if not svn_revision:  # pragma: no cover
                    self.Die("Cannot determine svn revision for %s" % git_hash)
                revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)

                # Is this revision included in the original revision list?
                if svn_revision in self["full_revision_list"]:
                    print ("Found port of r%s -> r%s (already included): %s" % (revision, svn_revision, revision_title))
                else:
                    print ("Found port of r%s -> r%s: %s" % (revision, svn_revision, revision_title))
                    port_revision_list.append(svn_revision)

        # Do we find any port?
        if len(port_revision_list) > 0:
            if self.Confirm("Automatically add corresponding ports (%s)?" % ", ".join(port_revision_list)):
                #: 'y': Add ports to revision list.
                self["full_revision_list"].extend(port_revision_list)
Example #27
0
def remove_duplicates():
    """
    deduplicate
      Removes duplicate lines from todo.txt.
    """
    file_path = get_file_dir()

    # Read the todo.txt file, make a list without duplicates
    with open(file_path, "r") as f:
        lines = (line.rstrip() for line in f)
        unique_lines = OrderedDict.fromkeys((line for line in lines if line))
    deduplicated_list = unique_lines.keys()

    # Count deleted lines
    count_original = len(current_tasks)
    count_deduplicated = len(deduplicated_list)
    total = count_original - count_deduplicated

    # Write the list back to the file
    write_tasks(deduplicated_list)

    if total == 0:
        print("TODO: No duplicate tasks found")
    else:
        print(total, "duplicate task(s) removed")
Example #28
0
    def RunStep(self):
        self["full_revision_list"] = list(OrderedDict.fromkeys(self._options.args))
        port_revision_list = []
        for revision in self["full_revision_list"]:
            # Search for commits which matches the "Port rXXX" pattern.
            args = "log svn/bleeding_edge --reverse " '--format=%%H --grep="Port r%d"' % int(revision)
            git_hashes = self.Git(args) or ""
            for git_hash in git_hashes.strip().splitlines():
                args = "svn find-rev %s svn/bleeding_edge" % git_hash
                svn_revision = self.Git(args).strip()
                if not svn_revision:
                    self.Die("Cannot determine svn revision for %s" % git_hash)
                revision_title = self.Git("log -1 --format=%%s %s" % git_hash)

                # Is this revision included in the original revision list?
                if svn_revision in self["full_revision_list"]:
                    print ("Found port of r%s -> r%s (already included): %s" % (revision, svn_revision, revision_title))
                else:
                    print ("Found port of r%s -> r%s: %s" % (revision, svn_revision, revision_title))
                    port_revision_list.append(svn_revision)

        # Do we find any port?
        if len(port_revision_list) > 0:
            if self.Confirm("Automatically add corresponding ports (%s)?" % ", ".join(port_revision_list)):
                #: 'y': Add ports to revision list.
                self["full_revision_list"].extend(port_revision_list)
Example #29
0
    def solve_deps(self, args, git_project):
        """ Called when use_build_deps is True

        * find the current build project as qibuild would do
        * solve the dependencies
        * find the git projects matching the dependencies

        """
        build_project = None
        try:
            build_project = self.build_parser.parse_no_project(args)[-1]
        except qibuild.parsers.CouldNotGuessProjectName:
            pass
        if not build_project:
            return [git_project]
        git_projects = list()  # Order matters
        deps_solver = qibuild.deps.DepsSolver(self.build_worktree)
        dep_types = qibuild.parsers.get_dep_types(args)
        deps_solver.dep_types = dep_types
        build_projects = deps_solver.get_dep_projects([build_project], dep_types)
        for build_project in build_projects:
            git_project = qisys.parsers.find_parent_project(self.git_worktree.git_projects, build_project.path)
            git_projects.append(git_project)
        # Idiom to sort an iterable preserving order
        return list(OrderedDict.fromkeys(git_projects))
Example #30
0
    def poll(self):
        """
        Get metrics from the http server's /jmx page, and transform them into normalized tupes

        @return: array of tuples ([u'Context', u'Array'], u'metricName', value)
        """
        json_arr = self.request()["beans"]
        kept = []
        for bean in json_arr:
            if (not bean["name"]) or (not "name=" in bean["name"]):
                continue
            # split the name string
            bean_name = bean["name"].split("name=")[1]
            context = re.split(",sub=|,q[0-9]+=", bean_name)
            # Create a set that keeps the first occurrence
            context = OrderedDict.fromkeys(context).keys()
            # lower case and replace spaces.
            context = [c.lower().replace(" ", "_") for c in context]
            # don't want to include the service or daemon twice
            context = [c for c in context if c != self.service and c != self.daemon]

            for key, value in bean.iteritems():
                if key in EXCLUDED_KEYS:
                    continue
                if not is_numeric(value):
                    continue
                kept.append((context, key, value))
        return kept