Exemplo n.º 1
0
 def __merge_menu(data, new_data):
     q = query(new_data).contains
     __data = query(data).where(lambda x: not q(
         x, lambda a, b: a.get("name", "") == b.get("name", ""))).to_list()
     return query(Config.merge(
         __data,
         new_data)).order_by(lambda x: x.get("weight", 999)).to_list()
def order_grouping_by(selector: Callable, type: Order = Order.Asc):
    """
    subselect the group items within each grouping.
    """
    asc = lambda x: x.order_by(lambda y: query(y).select(selector).sum())
    desc = lambda x: x.order_by_descending(lambda y: query(y).select(selector).
                                           sum())
    return asc if type == Order.Asc else desc
Exemplo n.º 3
0
 def group_join(self,
                inner_iterable,
                outer_key_selector=identity,
                inner_key_selector=identity,
                result_selector=lambda outer, grouping: grouping):
     return query(self).group_join(inner_iterable, outer_key_selector,
                                   inner_key_selector, result_selector)
Exemplo n.º 4
0
 def join(self,
          inner_iterable,
          outer_key_selector=identity,
          inner_key_selector=identity,
          result_selector=lambda outer, inner: (outer, inner)):
     return query(self).join(inner_iterable, outer_key_selector,
                             inner_key_selector, result_selector)
Exemplo n.º 5
0
def get_path() -> str:
    Pathlist = asq.query(os.listdir(os.getcwd())).where(lambda x: x.find(".xlsx") != -1).to_list()
    for path in Pathlist:
        a = input(path + "Y/N")
        if a.upper() == "Y":
            return path
    return ""
Exemplo n.º 6
0
    def get_content(self, file_path, params, lang=None, skip_ignore=False):
        with codecs.open(file_path, mode="r", encoding="utf-8") as input_file:
            text = input_file.read()
        __template = self.env.from_string(text)
        text = __template.render(params)
        m = self.re_mark_doc.match(text)
        if m:
            meta = yaml.safe_load(m.groupdict().get("meta", ""))
            meta["type"] = "markdown"
            content = m.groupdict().get("content", "")
            contents = []
            if meta.get("ignore", False) and not skip_ignore:
                return None
            if content:
                extender = TagExtender(self.config, meta, self.env, params)
                contents = self.re_split.split(content)
                content = self.__markdown(content)
                content = extender.extend(content, lang)
                contents = query(contents).select(self.__markdown).select(
                    lambda x: extender.extend(x, lang)).to_list()

            return {"meta": meta, "content": content, "contents": contents}
        else:
            return {
                "meta": {
                    "type": "text"
                },
                "content": text,
                "contents": [text]
            }
Exemplo n.º 7
0
def order_topics_by_insertion_length(q: Queryable):
    """
    Text Length.
    """
    return apply([
        where(a_('is_topic')),
        order_by(query(a_('inserted_diffs')).sum(lambda x: len(x[1])))
    ])
Exemplo n.º 8
0
    def OperatorTools(self) -> link:
        # 1
        # return link(self.webDriver, {By.ID: 'Menu_OperatorTools'})

        # 2
        elements = self.webDriver.find_elements(By.ID, 'Menu_OperatorTools')
        element = query(elements).first_or_default(None,
                                                   lambda e: e.is_displayed())
        return link(self.webDriver, None, element)
Exemplo n.º 9
0
 def element_at(self, index: int):
     source = query(self)
     if index < 0:
         source = source.reverse()
         index = -index
         index -= 1
     try:
         return source.element_at(index)
     except OutOfRangeError:
         raise IndexError
Exemplo n.º 10
0
 def __merge(current_item, data_array, current_lang=""):
     data_array = data_array or []
     if isinstance(current_item, list):
         res = query(current_item).select(lambda x:
                                          (current_lang, x)).to_list()
     elif isinstance(current_item, str):
         res = (current_lang, current_item)
     else:
         res = None
     return data_array if res is None else Config.merge(data_array, res)
Exemplo n.º 11
0
 def __collect_helper(config, language, path):
     __data = self.__get_sub_as_static_array("static", config, language,
                                             None)
     if __data is None:
         __data = [("", "static/")]
     __data = self.__get_sub_as_static_array("static2", config,
                                             language, __data)
     __data = self.__get_sub_as_static_array("static3", config,
                                             language, __data)
     return query(__data).select(__lam_path_join(path)).to_list()
Exemplo n.º 12
0
 def GetShotTime(self):
     dates = [
         self.datetime_meta_create, self.datetime_meta_modif,
         self.datetime_name, self.datetime_file_create,
         self.datetime_file_modif,
         datetime.now()
     ]
     return query(dates) \
         .where(lambda dt: dt != None) \
         .min()
Exemplo n.º 13
0
 def DefineNewObjects(self, yolo_boxes: [], pShot: PipelineShot):
     # this box not matched. when len(prev_boxes) < len(boxes). New Object ?
     meta = self.CreateMetadata(pShot)
     for b in asq.query(yolo_boxes) \
             .where(lambda b: b.object_id == None):
         b.object_id = self.GetNewObjectId()
         meta_box = {}
         meta_box['id'] = b.id
         meta_box['object_id'] = b.object_id
         meta['boxes'].append(meta_box)
         b.DrawStartPoint(pShot.Shot.GetImage())
Exemplo n.º 14
0
 def FromDir(self, folder: str):
     self = DirectoryShotsProvider()
     shots = asq.query(os.listdir(folder)) \
             .where(lambda f: self.IsSourceImage(f)) \
             .select(lambda f: CamShot(os.path.join(folder, f))) \
             .to_list()
     self.log.debug("Loaded {} shots from directory {}".format(
         len(shots), folder))
     for s in shots:
         s.LoadImage()
     return [PipelineShot(s, i) for i, s in enumerate(shots)]
Exemplo n.º 15
0
    def GetNetworkConfig(self, network: str, computername: str, platform: str):
        # for n in self.networks:
        #     print(f'========== Name: {n["name"]} =>')
        #     print(n.get("network")[0])
        #     print(network in n.get("network")[0])
        #     print(f'Network: {network in n.get("network")} / {n.get("network")}')
        #     print(f'Computername: {not n.get("computername") or n.get("computername") == computername}')
        #     print(f'Platform: {not n.get("platform") or n.get("platform") == platform}')

        return query(self.networks) \
            .where(lambda n: network in n.get('network') \
                and (not n.get('computername') or n.get('computername') == computername) \
                and (not n.get('platform') or n.get('platform') == platform)) \
            .first_or_default(None)
    def GetShots(self, pShots: []):
        self.log.info(f'<<<<<< SHOTS: ***{self.name}*** >>>>>>>>>>>>>>>>>>>>>>>>>>>')
        newPShots = list(self.GetShotsProtected(pShots))
        for i,s in enumerate(newPShots):
            self.log.debug(f'   <+++ #{s.Index} {s.Shot.filename} @{s.Shot.GetDatetime():%H:%M:%S} (full: {s.Shot.fullname})')
    
        pShots = query(pShots).union(newPShots, self.GetTime).to_list()
        # newPShots = filter(lambda s: not self.AlreadyHasShotAtThisTime(pShots, s), newPShots)
        # # newPShots.filt [s for s in newPShots if not self.AlreadyHasShotAtThisTime(pShots, s)]
        # pShots += newPShots
        # pShots.sort(key = lambda s: s.Shot.GetDatetime())
        for i,s in enumerate(pShots):
            s.Index = i

        return pShots
Exemplo n.º 17
0
    def get_sm_events(self,
                      start: dt.datetime,
                      end: dt.datetime,
                      filters: List[Callable],
                      limit: int = -1) -> Queryable:

        endpoint = f"buckets/{SM_BUCKET_NAME}/events"
        params: Dict[str, str] = dict()

        if limit is not None:
            params["limit"] = str(limit)
        if start is not None:
            params["start"] = start.isoformat()
        if end is not None:
            params["end"] = end.isoformat()
        aw_events = self._get(endpoint, params=params).json()
        sm_events = [SMEvent(**event) for event in aw_events]
        return apply(filters, query(sm_events))
Exemplo n.º 18
0
def main():
    base_dir = Path("d:/ml/input/chest_xray/chest_xray")
    train_dir = base_dir.joinpath("train")
    val_dir = base_dir.joinpath("val")
    test_dir = base_dir.joinpath("test")

    base_copy_dir = Path("d:/ml/input/chest_xray/cropped")
    train_copy_dir = base_copy_dir.joinpath("train")
    val_copy_dir = base_copy_dir.joinpath("val")
    test_copy_dir = base_copy_dir.joinpath("test")

    copy_info = (
        CopyInfo(train_dir, train_copy_dir),
        CopyInfo(val_dir, val_copy_dir),
        CopyInfo(test_dir, test_copy_dir),
    )

    for info in copy_info:
        print(info.src_path)
        normal_dir = info.src_path.joinpath("NORMAL")
        pneumo_dir = info.src_path.joinpath("PNEUMONIA")

        for child_dir in (normal_dir, pneumo_dir):
            print(f"\t{child_dir.name}")
            images = query(child_dir.iterdir()).where(lambda x: is_image(x))

            for index, image_path in enumerate(images):
                cropped = crop(image_path)

                dst = info.dst_path.joinpath(child_dir.name, image_path.name)
                dst.parent.mkdir(parents=True, exist_ok=True)

                failed: bool = False
                cause: str = ""
                try:
                    cropped.save(dst)
                except Exception as ex:
                    failed = True
                    cause = str(ex)

                text: str = "(success)"
                if failed:
                    text = f"(failed: {cause})"
                print(f"\t\t{index} -- {image_path.name} {text}", flush=True)
Exemplo n.º 19
0
    def ProcessItem(self, pShot: PipelineShot, ctx: dict):
        super().ProcessItem(pShot, ctx)
        pShots = ctx['items']
        meta = self.CreateMetadata(pShot)
        meta['boxes'] = []
        shot = pShot.Shot
        yolo_boxes = list(self.GetYoloBoxes(pShot))

        prevPShot = self.GetPreviousShot(pShot, pShots)
        if not prevPShot:
            boxes_last = []
        else:
            boxes_last = list(self.GetYoloBoxes(prevPShot))

        self.MatchObjects(yolo_boxes, boxes_last)
        self.DefineNewObjects(yolo_boxes, pShot)

        for box in yolo_boxes:
            # bestMatched:TrackingBox = box.CompareBox(boxes_last)
            box.DrawStartPoint(shot.GetImage())

            bestMatched:TrackingBox = query(boxes_last) \
                .first_or_default(None, lambda b: b.object_id == box.object_id)
            if bestMatched == None:
                self.log.debug(f" Best matchid box not found, draw ignore: Box: B{box.id}")
                continue
            #cv2.line(shot.image,bestMatched.center,box.center,(255,0,0),3)
            # self.log.debug(f"Draw box track: {box.id} ({box.GetCenter()})  matched:{bestMatched.id} ({bestMatched.GetCenter()})")
            # self.log.debug(f"Draw line: {bestMatched.GetCenter()} => {box.GetCenter()}")
            box.DrawLine(shot.GetImage(), bestMatched)
            #box.id = bestMatched.id
            meta_box = {}
            meta_box['id'] = box.id
            meta_box['object_id'] = box.object_id
            meta_box['distance'] = int(box.Distance(bestMatched))
            meta_box['angle'] = int(box.angle(bestMatched))
            if self.isDebug:
                meta_box['center'] = box.GetCenter()
            meta['boxes'].append(meta_box)
Exemplo n.º 20
0
 def get_items(self, path):
     return query(self._client.item(drive='me', path=path).children.get())\
         .select(lambda i: to_onedrive_item(i, os.path.join(path, i.name)))
Exemplo n.º 21
0
 def test_asq_iterable(self):
     a = [5, 4, 3, 2, 1]
     b = query(a)
Exemplo n.º 22
0
 def __any(static, path_part, lang=''):
     return query(static).any(
         lambda x: (lambda a, b: a == lang and b.endswith(path_part))(*x))
Exemplo n.º 23
0
 def test_asq_non_iterable(self):
     self.assertRaises(TypeError, lambda: query(5))
Exemplo n.º 24
0
 def HasEmptyString(self):
     if self.DoesOptionExist:
         return query(self.AllOptions) \
             .select(lambda option: option.text.strip()).any(lambda label: label == '')
     else:
         return True
Exemplo n.º 25
0
 def Labels(self):
     if self.DoesOptionExist:
         return query(self.AllOptions) \
             .select(lambda option: option.text.strip()).where(lambda label: label != '').to_list()
     else:
         return []
Exemplo n.º 26
0
 def Options(self):
     return query(self.AllOptions).select(lambda e: dropdownOption(self.webDriver, None, e))
Exemplo n.º 27
0
 def Values(self):
     if self.DoesOptionExist:
         return query(self.AllOptions) \
             .select(lambda option: option.get_attribute('value')).where(lambda value: value != '').to_list()
     else:
         return []
Exemplo n.º 28
0
    #获取行程规划
    r = requests.get(f"http://api.map.baidu.com/direction/v2/transit?origin={lat},{lng}&destination={clat},{clng}&ak={ak}")
    if r.json()["status"] == 0:
        transit = r.json()["result"]
        transit["source"] = f"{lat},{lng}丨{clat},{clng}"
        transit["CompanyId"] = data["CompanyId"]
        if db.transit.count({"$and":[{"source":transit["source"]},{"CompanyId":data["CompanyId"]}]}) == 0:
            try:
                db.transit.insert(transit)
            except:
                print("company重复")



threads = [] #线程池

#按照公司名分组后根据职位名去重
for groupdata in query(data).order_by_descending(lambda x:x["RecruitId"]).group_by(lambda x:x["CompanyName"]):
    for x in query(groupdata).distinct(lambda x:x["JobName"]):
        threads.append(threading.Thread(target=getDetail_xmrc,args=(x.get("RecruitId"),)))
        threads.append(threading.Thread(target=getCompany_xmrc,args=(x.get("CompanyId"),)))
        #getDetail_xmrc(x.get("RecruitId"))
        #time.sleep(random.random()*10)
        #getCompany_xmrc(x.get("CompanyId"))
        #time.sleep(random.random()*10)
for t in threads:
    t.start()
    t.join()
    time.sleep(random.random())

conn.close()
Exemplo n.º 29
0
    def __init__(self, config_file):
        self.config_file = config_file
        self.path = os.path.abspath(os.path.dirname(config_file.name))
        self.config = yaml.safe_load(config_file)
        if not self.config.get("theme"):
            fatal(_("theme_not_set"))
        self.theme_path = join(self.path, "themes/", self.config["theme"])

        self.build = {}
        if not exists(join(self.theme_path, "theme.yaml")):
            fatal(_("theme_not_found"))

        with open(join(self.theme_path, "theme.yaml"), "r") as yaml_file:
            self.theme_config = yaml.safe_load(yaml_file)
            yaml_file.close()

        self.theme_config = {
            "data": self.theme_config.get("data", {}),
            "languages": self.theme_config.get("languages", {}),
            "menu": self.theme_config.get("menu", {}),
            "title": self.theme_config.get("title", None),
            "static": self.theme_config.get("static", "static/"),
            "morph": self.theme_config.get("morph", {}),
            "static2": self.theme_config.get("static2", None),
            "static3": self.theme_config.get("static3", None),
        }
        self.data = self.merge(self.theme_config, self.config)
        self.default_language = self.data.get("defaultLanguage", "")
        # check modes
        self.is_different_content_root = query(
            self.data.get("languages",
                          {}).items()).any(lambda x: "content" in x[1])
        if self.is_different_content_root:
            log.info("content different root mode")
            for wrong_language in query(self.data.get("languages", {}).items())\
                    .where(lambda x: "content" not in x[1])\
                    .select(lambda x: x[0]).to_list():
                log.warning(
                    "not set language.%s.content for content different root mode"
                    % wrong_language)

        re_keys = query(
            self.get_all_languages_keys()).aggregate(lambda a, b: a + "|" + b)

        self.is_different_target_root = query(
            self.data.get("languages",
                          {}).items()).any(lambda x: "target" in x[1])
        if self.is_different_target_root:
            log.info("target different root mode")
            for wrong_language in query(self.data.get("languages", {}).items())\
                    .where(lambda x: "target" not in x[1])\
                    .select(lambda x: x[0]).to_list():
                log.warning(
                    "not set language.%s.target for target different root mode"
                    % wrong_language)

        if len(re_keys) == 0:
            self.re_content_multi_language_folder = re.compile("(?P<name>.*)$")
            self.re_content_multi_language_content = re.compile(
                "(?P<name>.*)\\.(?P<ext>\\w+)$")
        else:
            self.re_content_multi_language_folder = re.compile(
                "(?P<name>.*)\\.(?P<lang>[%s])$" % re_keys)
            self.re_content_multi_language_content = re.compile(
                "(?P<name>.*)\\.(?P<lang>[%s])\\.(?P<ext>\\w+)$" % re_keys)
Exemplo n.º 30
0
def extract_symmetry_from_vertex_and_edge_lists(vertices, edges):
    """
    Args:
        vertices: A sequence of integer vertex labels which must be
            in order around a circular embedding of the graph (the
            geometric sense of rotation is unimportant).

        edges: A sequence of 2-tuples each of which represents an
            undirected edge in the graph between two integer
            vertex labels.

    Returns:
        A sequence of 2-tuples, where the two elements of each
        pair are themselves sequences of integers.

        The integer elements of the first sequence in each pair are
        sorted integer vertex labels. For a symmetric embedding, the
        cardinality of each of these sequences will be equal
        to the order of the rotational symmetry (e.g. when the
        first sequence of each pair contains 7 elements, the
        graph embedding has 7-fold symmetry).

        The integer elements of the second sequence in each pair are
        sorted offsets around the circular embedding from each of
        the vertices in the first sequence to each of their neighbours.
        For regular graphs, the cardinality of this sequence will be
        equal to the degree of the graph. (e.g. when the second
        sequence contains 10 offsets, the graph is of regular
        degree 10). The integer offsets are modulo the number of
        vertices in the graph, so are always positive, and in the
        'forwards' direction around the circular embedding.

        Each of the offsets in the second sequence of each pair
        represents an edge from each of the vertices in the first
        sequence. So for respective sequence lengths of 7 and 10,
        70 edges are described by each pair.

        The length of the outer sequence (i.e. the number of pairs)
        will be equal to the number of vertices in the graph, divided
        by the order of the rotational symmetry. (e.g. for a 56 vertex
        graph, with 7-fold rotational symmetry, a sequence of 8 pairs
        will be returned).

        Each edge will be recorded in the returned data structure twice
        to make it easier to see other symmetries and to avoid deciding
        which would be the canonical direction. (e.g. For a 56 vertex
        graph with regular degree 10, and 7-fold rotational symmetry,
        the returned structure will describe 8*10*7=560 connections
        between vertices, when in fact the graph has only 280 edges.
    """
    # Add all edges in both directions to help us find symmetries
    reversed_edges = [(b, a) for a, b in edges]
    edges.extend(reversed_edges)

    #pprint(edges)

    assert len(vertices) == 56

    offset_edges = [(from_vertex_index,
                     (to_vertex_index - from_vertex_index) % len(vertices))
                    for from_vertex_index, to_vertex_index in edges]

    #print('*' * 10)

    # pprint(offset_edges)

    #print('*' * 10)

    sorted_offset_edges = query(offset_edges).group_by(
        key_selector=lambda edge: edge[0],  # from-vertex
        element_selector=lambda edge: edge[1],  # to-vertex
        result_selector=lambda key, group:
        (key, tuple(sorted(group)))).to_list()

    #print(sorted_offset_edges)
    #print('*' * 10)

    sorted_offsets_sets_to_sources = query(sorted_offset_edges).group_by(
        key_selector=lambda sorted_offset_group: sorted_offset_group[1],
        element_selector=lambda sorted_offset_group: sorted_offset_group[0],
        result_selector=lambda key, group:
        (key, tuple(sorted(group)))).to_list()

    # pprint(sorted_offsets_sets_to_sources)
    # print('*' * 10)

    # print(len(sorted_offsets_sets_to_sources))
    # print('*' * 10)

    sources_to_sorted_offsets = sorted(
        ((q, p) for p, q in sorted_offsets_sets_to_sources),
        key=lambda w: w[0])

    # pprint(sources_to_sorted_offsets)
    # print('*' * 10)
    return sources_to_sorted_offsets