Exemplo n.º 1
0
def validate_book(session, flush_context, instances):
    # identify object to validate
    txs = OrderedDict()
    for change, l in {"dirty": session.dirty,
                      "new": session.new,
                      "deleted": session.deleted}.items():
        for o in l:
            for o_to_validate in o.object_to_validate(change):
                txs[o_to_validate] = None

    # remove None from the keys in the dictionary (if it ever gets included)
    txs.pop(None, None)  # txs.discard(None)

    # sort object from local to global (ensure Split checked before Transaction)
    from . import Account, Transaction, Split

    txs = list(txs)
    txs.sort(key=lambda x: defaultdict(lambda: 20, {Account: 10,
                                                    Transaction: 5,
                                                    Split: 3,
    })[type(x)])

    # for each object, validate it
    for tx in txs:
        tx.validate()
Exemplo n.º 2
0
def get_merge_window(csv_chapter):
    """
    compares refs which appear in English and Hebrew to help narrow down the search window
    :param CSVChapter csv_chapter:
    :return: window_start, window_end, lang
    """
    en_refs, he_refs = OrderedDict(), OrderedDict()
    en_segments, he_segments = csv_chapter.english_segments, csv_chapter.hebrew_segments

    for seg_num, segment in enumerate(en_segments):
        refs = [r.normal() for r in library.get_refs_in_string(segment, 'en', citing_only=True)]
        for ref in refs:
            en_refs.setdefault(ref, list()).append(seg_num)

    for seg_num, segment in enumerate(he_segments):
        refs = [r.normal() for r in library.get_refs_in_string(segment, 'he', citing_only=True)]
        for ref in refs:
            if ref in en_refs:  # we only want refs that appear in both languages
                he_refs.setdefault(ref, list()).append(seg_num)

    # clear out en refs that did not show up in hebrew
    for ref in en_refs.keys():
        if ref not in he_refs:
            del en_refs[ref]
    assert len(en_refs) == len(he_refs)

    # make sure we have the same number of appearances of each ref
    for ref in en_refs.keys():
        en_refs[ref], he_refs[ref] = standardize_lists(en_refs[ref], he_refs[ref])
    en_refs = [(ref, segment) for ref, segs in en_refs.items() for segment in segs]
    en_refs.sort(key=lambda x: x[1])

    last_good_segment = 0
    for ref, en_seg in en_refs:
        he_seg = min(he_refs[ref], key=lambda x: abs(x-en_seg))
        if en_seg == he_seg:
            last_good_segment = en_seg
        elif abs(en_seg - he_seg) == 1:
            if en_seg > he_seg:
                window_close, lang = en_seg, 'en'
            else:
                window_close, lang = he_seg, 'he'
            break
        else:
            raise AssertionError("Chapter {}: Jump too big".format(csv_chapter.number))
    else:
        diff = abs(len(en_segments) - len(he_segments))
        if diff == 0:
            print "Chapter {}: No Merge needed".format(csv_chapter.number)
            last_good_segment, window_close, lang = len(en_segments), len(en_segments), 'en'
        elif diff > 1:
            raise AssertionError("Chapter {}: Jump too big".format(csv_chapter.number))
        else:
            if len(en_segments) > len(he_segments):
                window_close = len(en_segments) - 1  # the actual index of the last segment
                lang = 'en'
            else:
                window_close = len(he_segments) - 1
                lang = 'he'
    return last_good_segment, window_close, lang
Exemplo n.º 3
0
    def sortEntry(self, item):
        newitem = item
        try:
            if type(newitem) == type(dict()):
                newitem = OrderedDict(sorted(newitem.items(), key=lambda t: t[0]))
            elif type(newitem) == type(list()):
                newitem.sort(key=lambda obj: obj['id'])
                for index, entry in enumerate(newitem):
                    newitem[index] = self.sortEntry(newitem[index])
        except:
            pass

        return newitem
Exemplo n.º 4
0
def cluster(res):
    cluster_labels = res[1]
    num_cluster = res[0]
    res_cluster = OrderedDict()
    for i in range(0, len(cluster_labels)):
        # print(i)
        if cluster_labels[i] in res_cluster:
            res_cluster[cluster_labels[i]].append(i)
        else:
            res_cluster[cluster_labels[i]] = [i]
    print("------------==============--------------")
    res_cluster = [res_cluster[i] for i in range(0, num_cluster)]
    res_cluster = [sorted(r) for r in res_cluster if len(r) > 1]
    res_cluster.sort(key=len, reverse=True)
    return res_cluster
Exemplo n.º 5
0
def sort_entry(item):
    newitem = item
    try:
        if type(newitem) == type(dict()):
            newitem = OrderedDict(sorted(newitem.items(), key=lambda t: t[0]))
            for index in newitem:
                newitem[index] = sort_entry(newitem[index])
        elif type(newitem) == type(list()):
            newitem.sort(key=lambda obj: obj['id'])
            for index, _ in enumerate(newitem):
                newitem[index] = sort_entry(newitem[index])
    except:
        logger.error('Sort failed for item %s' % type(item))
        pass

    return newitem
Exemplo n.º 6
0
    def bind_args(self, bind_args: OrderedDict):
        num_args_no_default = sum(
            int(not arg.default_val) for arg in self.func_args)
        if len(bind_args) < num_args_no_default:
            raise InvokeArgCountError(
                f"Function {self.name} requires at least {num_args_no_default} args, "
                f"but was called with {len(bind_args)}")

        matched = OrderedDict()
        for arg in self.func_args:
            arg.bind_to_args(bind_args, matched)

        unbound = [str(key) for key in bind_args.keys() if key not in matched]
        if unbound:
            names = ', '.join(unbound)
            raise UnusedParamError(
                f"Function {self.name} failed to bind params: {names}")

        positional_args = [
            match for key, match in matched.items() if isinstance(key, int)
        ]

        bind_args = [
            FuncBindArg(index=arg.index,
                        invoke_arg=invoke_arg,
                        bind_to_arg=func_arg,
                        use_default=use_default)
            for func_arg, invoke_arg, use_default in positional_args
        ]

        bind_args.sort(key=lambda arg: arg.index)

        binding = FuncBind(func_def=self,
                           ret_type=self.solve_ret_type(),
                           bind_args=bind_args)

        binding.verify_predictates()

        return binding
Exemplo n.º 7
0
def generate_vocabs(texts):
    word_counts = OrderedDict()
    max_seq_len = 0
    for text in texts:
        if isinstance(text, list):
            seq = text
        else:
            seq = text_to_word_sequence(text)
        if len(seq) > max_seq_len:
            max_seq_len = len(seq)
        for w in seq:
            if w in word_counts:
                word_counts[w] += 1
            else:
                word_counts[w] = 1
    word_counts = list(word_counts.items())
    word_counts.sort(key = lambda x: x[1], reverse=True)
    sorted_vocab = [word_count[0] for word_count in word_counts]
    word2idx = dict(list(zip(sorted_vocab, list(range(1, len(sorted_vocab) + 1)))))
    i = word2idx.get(OOV_TOKEN)
    if i is None:
        word2idx[OOV_TOKEN] = len(word2idx) + 1
    idx2word = {value : key for key, value in word2idx.items()}
    return word2idx, idx2word, max_seq_len
Exemplo n.º 8
0
def standardize_data(data):
    """Standardize photometric data by converting to a structured numpy array
    with standard column names (if necessary) and sorting entries in order of
    increasing time.

    Parameters
    ----------
    data : `~astropy.table.Table` or `~numpy.ndarray` or `dict`

    Returns
    -------
    standardized_data : `~numpy.ndarray`
    """

    warn_once('standardize_data', '1.5', '2.0',
              'This function not intended for public use; open an issue at '
              'https://github.com/sncosmo/sncosmo/issues if you need this '
              'functionality.')

    if isinstance(data, Table):
        data = np.asarray(data)

    if isinstance(data, np.ndarray):
        colnames = data.dtype.names

        # Check if the data already complies with what we want
        # (correct column names & ordered by date)
        if (set(colnames) == set(PHOTDATA_ALIASES.keys()) and
                np.all(np.ediff1d(data['time']) >= 0.)):
            return data

    elif isinstance(data, dict):
        colnames = data.keys()

    else:
        raise ValueError('Unrecognized data type')

    # Create mapping from lowercased column names to originals
    lower_to_orig = dict([(colname.lower(), colname) for colname in colnames])

    # Set of lowercase column names
    lower_colnames = set(lower_to_orig.keys())

    orig_colnames_to_use = []
    for aliases in PHOTDATA_ALIASES.values():
        i = lower_colnames & aliases
        if len(i) != 1:
            raise ValueError('Data must include exactly one column from {0} '
                             '(case independent)'.format(', '.join(aliases)))
        orig_colnames_to_use.append(lower_to_orig[i.pop()])

    if isinstance(data, np.ndarray):
        new_data = data[orig_colnames_to_use].copy()
        new_data.dtype.names = list(PHOTDATA_ALIASES.keys())

    else:
        new_data = OrderedDict()
        for newkey, oldkey in zip(PHOTDATA_ALIASES.keys(),
                                  orig_colnames_to_use):
            new_data[newkey] = data[oldkey]

        new_data = dict_to_array(new_data)

    # Sort by time, if necessary.
    if not np.all(np.ediff1d(new_data['time']) >= 0.):
        new_data.sort(order=['time'])

    return new_data
Exemplo n.º 9
0
    def get_nav_menu(self):
        site_menu = list(self.get_site_menu() or [])
        had_urls = []

        def get_url(menu, had_urls):
            if "url" in menu:
                had_urls.append(menu["url"])
            if "menus" in menu:
                for m in menu["menus"]:
                    get_url(m, had_urls)

        get_url({"menus": site_menu}, had_urls)

        nav_menu = OrderedDict()

        for model, model_admin in self.admin_site._registry.items():
            if getattr(model_admin, "hidden_menu", False):
                continue
            app_label = model._meta.app_label
            app_icon = None
            model_dict = {
                "title": unicode(capfirst(model._meta.verbose_name_plural)),
                "url": self.get_model_url(model, "changelist"),
                "icon": self.get_model_icon(model),
                "perm": self.get_model_perm(model, "view"),
                "order": model_admin.order,
            }
            if model_dict["url"] in had_urls:
                continue

            app_key = "app:%s" % app_label
            if app_key in nav_menu:
                nav_menu[app_key]["menus"].append(model_dict)
            else:
                # Find app title
                app_title = unicode(app_label.title())
                if app_label.lower() in self.apps_label_title:
                    app_title = self.apps_label_title[app_label.lower()]
                else:
                    mods = model.__module__.split(".")
                    if len(mods) > 1:
                        mod = ".".join(mods[0:-1])
                        if mod in sys.modules:
                            mod = sys.modules[mod]
                            if "verbose_name" in dir(mod):
                                app_title = getattr(mod, "verbose_name")
                            elif "app_title" in dir(mod):
                                app_title = getattr(mod, "app_title")
                # find app icon
                if app_label.lower() in self.apps_icons:
                    app_icon = self.apps_icons[app_label.lower()]

                nav_menu[app_key] = {"title": app_title, "menus": [model_dict]}

            app_menu = nav_menu[app_key]
            if app_icon:
                app_menu["first_icon"] = app_icon
            elif ("first_icon" not in app_menu or app_menu["first_icon"] == self.default_model_icon) and model_dict.get(
                "icon"
            ):
                app_menu["first_icon"] = model_dict["icon"]

            if "first_url" not in app_menu and model_dict.get("url"):
                app_menu["first_url"] = model_dict["url"]

        for menu in nav_menu.values():
            menu["menus"].sort(key=sortkeypicker(["order", "title"]))

        nav_menu = nav_menu.values()
        nav_menu.sort(key=lambda x: x["title"])

        site_menu.extend(nav_menu)

        return site_menu
Exemplo n.º 10
0
    def execute(self):
        # 保证不会因为下面卡住导致task 不加入queue
        if self.continue_task:
            answer_task_queue.append(self)
        else:
            return

        new_upvoters = deque()
        new_commenters = OrderedDict()
        new_collectors = []
        self.answer.refresh()

        if self.answer.deleted:
            self._delete_answer()
            return

        # Note: put older event in lower index

        # add upvoters, 匿名用户不记录
        if self.answer.upvote_num > self.upvote_num:
            self.upvote_num = self.answer.upvote_num
            for upvoter in self.answer.upvoters:
                if upvoter is ANONYMOUS:
                    continue
                if upvoter.id in self.manager.upvoters:
                    break
                else:
                    new_upvoters.appendleft({
                        'uid': upvoter.id,
                        'time': self.get_upvote_time(upvoter, self.answer)
                    })
            if new_upvoters:
                self.last_update_time = new_upvoters[-1]['time']

        if not self._check_answer_activation():
            return  # 不删除回答!!

        # add commenters, 匿名用户不记录
        # 同一个人可能发表多条评论, 所以还得 check 不是同一个 commenter
        # 注意, 一次新增的评论中也会有同一个人发表多条评论的情况, 需要收集最早的那个
        # 下面的逻辑保证了同一个 commenter 的更早的 comment 会替代新的
        if self.answer.comment_num > self.comment_num:
            self.comment_num = self.answer.comment_num
            for comment in self.answer.latest_comments:
                if comment.author is ANONYMOUS:
                    continue
                if comment.author.id in self.manager.commenters:
                    if comment.creation_time <= self.manager.lastest_comment_time:
                        break
                else:
                    new_commenters[comment.author.id] = {
                        'uid': comment.author.id,
                        'time': comment.creation_time,
                        'cid': comment.cid
                    }
            if new_commenters:
                new_commenters = list(new_commenters.values())
                new_commenters.sort(key=lambda x: x['time'])

        # add collectors
        # 收藏夹不是按时间返回, 所以只能全部扫一遍
        if self.answer.collect_num > self.collect_num:
            self.collect_num = self.answer.collect_num
            for collection in self.answer.collections:
                if collection.owner.id not in self.manager.collectors:
                    new_collectors.append({
                        'uid': collection.owner.id,
                        'time': self.get_collect_time(self.answer, collection),
                        'cid': collection.id
                    })
            new_collectors.sort(key=lambda x: x['time'])

        self.manager.sync_affected_users(new_upvoters=new_upvoters,
                                         new_commenters=new_commenters,
                                         new_collectors=new_collectors)
Exemplo n.º 11
0
def component(request, organization, project, component_name):
    """Show one component from a project."""

    # Load the project.
    try:
        project = load_project(organization, project)
    except ValueError:
        return "Organization `{}` project `{}` in URL not found.".format(
            organization, project)

    # Load the component.
    try:
        component = opencontrol.load_project_component(project, component_name)
    except ValueError:
        return "Component `{}` in URL not found in project.".format(
            component_name)

    # Each control's metadata, such as control names and control family names,
    # is loaded from standards. Load the standards first.
    standards = opencontrol.load_project_standards(project)

    # Load the component's controls.
    controlimpls = list(
        opencontrol.load_project_component_controls(component, standards))

    # Group the controls by control family, and sort the families and the controls within them.
    # Iterate over the controls....
    from collections import OrderedDict
    control_families = OrderedDict()
    for controlimpl in controlimpls:
        # If this is the first time we're seeing this control family, make a new
        # bucket for the control family.
        fam_id = (controlimpl["standard"]["id"], controlimpl["family"]["id"])
        if fam_id not in control_families:
            control_families[fam_id] = {
                "id":
                controlimpl["family"]["id"],
                "name":
                controlimpl["family"]["name"],
                "abbrev":
                controlimpl["family"]["abbrev"],
                "sort_key": (controlimpl["standard"]["name"],
                             controlimpl["family"]["sort_key"]),
                "standard":
                controlimpl["standard"],
                "controls": [],
            }

        # Put this control into the bucket for its family.
        control_families[fam_id]["controls"].append(controlimpl)

    # Sort the families and then the controls within them.
    control_families = list(control_families.values())
    control_families.sort(key=lambda controlfamily: controlfamily["sort_key"])
    for control_family in control_families:
        control_family["controls"].sort(
            key=lambda controlimpl: controlimpl["sort_key"])

    # For editing controls, we offer a list of evidence to attach to each control.
    evidence = list(opencontrol.load_project_component_evidence(component))

    # Make a sorted list of controls --- the control catalog --- that the user can
    # draw from when adding new control implementations to the component.
    control_catalog = []
    for standard in standards.values():
        for control in standard["controls"].values():
            control = dict(control)  # clone
            control['standard'] = {
                "id": standard["id"],
                "name": standard["name"],
            }
            control['family'] = standard['families'].get(control['family'])
            control_catalog.append(control)
    control_catalog.sort(key=lambda control: control['sort_key'])

    # Also make a sorted list of source files containing control implementation text.
    # In OpenControl, all controls are in component.yaml. But we support breaking the
    # controls out into separate files, and when adding a new control the user can
    # choose which file to put it in. In case no controls are in the component.yaml
    # file, ensure it is in the list, and make sure it comes first.
    import os.path
    source_files = set()
    source_files.add(os.path.join(component['path'], 'component.yaml'))
    for controlimpl in controlimpls:
        source_files.add(controlimpl['source_file'])
    source_files = sorted(source_files,
                          key=lambda s: (not s.endswith("component.yaml"), s))

    # Done.
    return render_template(
        request,
        'component.html',
        project=project,
        component=component,
        control_families=control_families,
        evidence=evidence,
        control_catalog=
        control_catalog,  # used for creating a new control in the component
        source_files=
        source_files,  # used for creating a new control in the component
        implementation_status_css_classes=implementation_status_css_classes,
        stats=compute_control_implementation_statistics(controlimpls),
    )
Exemplo n.º 12
0
    def get_nav_menu(self):
        site_menu = list(self.get_site_menu() or [])
        # print(site_menu,'1111111')  # 为空列表
        had_urls = []

        def get_url(menu, had_urls):
            if 'url' in menu:
                had_urls.append(menu['url'])
            if 'menus' in menu:
                for m in menu['menus']:
                    get_url(m, had_urls)

        get_url({'menus': site_menu}, had_urls)

        nav_menu = OrderedDict()

        for model, model_admin in self.admin_site._registry.items():
            if getattr(model_admin, 'hidden_menu', False):
                continue
            app_label = model._meta.app_label
            app_icon = None
            model_dict = {
                'title': smart_text(capfirst(model._meta.verbose_name_plural)),
                'url': self.get_model_url(model, "changelist"),
                'icon': self.get_model_icon(model),
                'perm': self.get_model_perm(model, 'view'),
                'order': model_admin.order,
            }
            if model_dict['url'] in had_urls:
                continue

            app_key = "app:%s" % app_label
            if app_key in nav_menu:
                nav_menu[app_key]['menus'].append(model_dict)
            else:
                # Find app title
                app_title = smart_text(app_label.title())
                if app_label.lower() in self.apps_label_title:
                    app_title = self.apps_label_title[app_label.lower()]
                else:
                    app_title = smart_text(
                        apps.get_app_config(app_label).verbose_name)
                # find app icon
                if app_label.lower() in self.apps_icons:
                    app_icon = self.apps_icons[app_label.lower()]

                nav_menu[app_key] = {
                    'title': app_title,
                    'menus': [model_dict],
                }

            app_menu = nav_menu[app_key]
            if app_icon:
                app_menu['first_icon'] = app_icon
            elif ('first_icon' not in app_menu or app_menu['first_icon']
                  == self.default_model_icon) and model_dict.get('icon'):
                app_menu['first_icon'] = model_dict['icon']

            if 'first_url' not in app_menu and model_dict.get('url'):
                app_menu['first_url'] = model_dict['url']

        for menu in nav_menu.values():
            menu['menus'].sort(key=sortkeypicker(['order', 'title']))

        nav_menu = list(nav_menu.values())
        nav_menu.sort(key=lambda x: x['title'])
        # print(nav_menu,'nnnnnnnnn')  # nav好像注册的是自己的
        # try:
        #     # 自定义高标清菜单
        #     from hall.apps import HallConfig
        #     hall_title = HallConfig.verbose_name
        #     from resource_.apps import ResourceConfig
        #     resource_title = ResourceConfig.verbose_name
        #     tmp_menus = []
        #     for index,item_dict in enumerate(nav_menu):
        #         if item_dict["title"] == resource_title:
        #             tmp_menus = item_dict['menus']
        #             del nav_menu[index]
        #     for item_dict in nav_menu:
        #         if item_dict["title"] == hall_title:
        #             item_dict['menus'].extend(tmp_menus)
        # except:
        #     pass
        # try:
        #     from hallSQ.apps import HallsqConfig
        #     hallSQ_title = HallsqConfig.verbose_name
        #     from resourceSQ.apps import ResourcesqConfig
        #     resourceSQ_title = ResourcesqConfig.verbose_name
        #     tmp_menus = []
        #     for index, item_dict in enumerate(nav_menu):
        #         if item_dict["title"] == resourceSQ_title:
        #             tmp_menus = item_dict['menus']
        #             del nav_menu[index]
        #     for item_dict in nav_menu:
        #         if item_dict["title"] == hallSQ_title:
        #             item_dict['menus'].extend(tmp_menus)
        # except:
        #     pass
        # try:
        #     from hall.apps import HallConfig
        #     hall_title = HallConfig.verbose_name
        #     from resource_.apps import ResourceConfig
        #     resource_title = ResourceConfig.verbose_name
        #     tmp_menus = []
        #     for item_dict in nav_menu:
        #         if item_dict["title"] == resource_title:
        #             tmp_menus = item_dict['menus']
        #     for item_dict in nav_menu:
        #         if item_dict["title"] == hall_title:
        #             item_dict['menus'].appends(tmp_menus)
        #     print(tmp_menus,'tttttttt')
        #
        # except:
        #     pass
        site_menu.extend(nav_menu)
        # print(site_menu)
        return site_menu
Exemplo n.º 13
0
    def get_nav_menu(self):
        site_menu = list(self.get_site_menu() or [])
        had_urls = []

        def get_url(menu, had_urls):
            if 'url' in menu:
                had_urls.append(menu['url'])
            if 'menus' in menu:
                for m in menu['menus']:
                    get_url(m, had_urls)

        get_url({'menus': site_menu}, had_urls)

        nav_menu = OrderedDict()

        for model, model_admin in self.admin_site._registry.items():
            if getattr(model_admin, 'hidden_menu', False):
                continue
            app_label = model._meta.app_label
            app_icon = None
            model_dict = {
                'title': smart_text(capfirst(model._meta.verbose_name_plural)),
                'url': self.get_model_url(model, "changelist"),
                'icon': self.get_model_icon(model),
                'perm': self.get_model_perm(model, 'view'),
                'order': model_admin.order,
            }
            if model_dict['url'] in had_urls:
                continue

            app_key = "app:%s" % app_label
            if app_key in nav_menu:
                nav_menu[app_key]['menus'].append(model_dict)
            else:
                # Find app title
                app_title = smart_text(app_label.title())
                if app_label.lower() in self.apps_label_title:
                    app_title = self.apps_label_title[app_label.lower()]
                else:
                    app_title = smart_text(
                        apps.get_app_config(app_label).verbose_name)
                # find app icon
                if app_label.lower() in self.apps_icons:
                    app_icon = self.apps_icons[app_label.lower()]

                nav_menu[app_key] = {
                    'title': app_title,
                    'menus': [model_dict],
                }

            app_menu = nav_menu[app_key]
            if app_icon:
                app_menu['first_icon'] = app_icon
            elif ('first_icon' not in app_menu or app_menu['first_icon']
                  == self.default_model_icon) and model_dict.get('icon'):
                app_menu['first_icon'] = model_dict['icon']

            if 'first_url' not in app_menu and model_dict.get('url'):
                app_menu['first_url'] = model_dict['url']

        for menu in nav_menu.values():
            menu['menus'].sort(key=sortkeypicker(['order', 'title']))

        nav_menu = list(nav_menu.values())
        nav_menu.sort(key=lambda x: x['title'])

        site_menu.extend(nav_menu)

        return site_menu
Exemplo n.º 14
0
    def get_nav_menu(self):
        site_menu = list(self.get_site_menu() or [])
        had_urls = []

        def get_url(menu, had_urls):
            if 'url' in menu:
                had_urls.append(menu['url'])
            if 'menus' in menu:
                for m in menu['menus']:
                    get_url(m, had_urls)
        get_url({'menus': site_menu}, had_urls)

        nav_menu = OrderedDict()

        for model, model_admin in self.admin_site._registry.items():
            if getattr(model_admin, 'hidden_menu', False):
                continue
            app_label = model._meta.app_label
            app_icon = None
            model_dict = {
                'title': smart_text(capfirst(model._meta.verbose_name_plural)),
                'url': self.get_model_url(model, "changelist"),
                'icon': self.get_model_icon(model),
                'perm': self.get_model_perm(model, 'view'),
                'order': model_admin.order,
            }
            if model_dict['url'] in had_urls:
                continue

            app_key = "app:%s" % app_label
            if app_key in nav_menu:
                nav_menu[app_key]['menus'].append(model_dict)
            else:
                # Find app title
                app_title = smart_text(app_label.title())
                if app_label.lower() in self.apps_label_title:
                    app_title = self.apps_label_title[app_label.lower()]
                else:
                    app_title = smart_text(apps.get_app_config(app_label).verbose_name)
                #find app icon
                if app_label.lower() in self.apps_icons:
                    app_icon = self.apps_icons[app_label.lower()]

                nav_menu[app_key] = {
                    'title': app_title,
                    'menus': [model_dict],
                }

            app_menu = nav_menu[app_key]
            if app_icon:
                app_menu['first_icon'] = app_icon
            elif ('first_icon' not in app_menu or
                    app_menu['first_icon'] == self.default_model_icon) and model_dict.get('icon'):
                app_menu['first_icon'] = model_dict['icon']

            if 'first_url' not in app_menu and model_dict.get('url'):
                app_menu['first_url'] = model_dict['url']

        for menu in nav_menu.values():
            menu['menus'].sort(key=sortkeypicker(['order', 'title']))

        nav_menu = list(nav_menu.values())
        nav_menu.sort(key=lambda x: x['title'])

        site_menu.extend(nav_menu)

        return site_menu
Exemplo n.º 15
0
def getvars(*args,
            glob: bool = True,
            ws: [dict, type(None)] = None,
            as_dict: bool = False,
            sort: bool = False,
            sortkey: object = None,
            reverse: bool = False) -> object:
    """Collects a subset of variabes from a workspace or a dictionary.

    Returns a (possibly sorted) list of the variables if found, or an empty list.
    
    Var-positional parameters:
    ==========================
    *args: selection criterion. This may be: 
        1) a string or a sequence of strings, each containing either a shell-type
            "glob" expression, or a regular expression (a "regexp", see python's 
            re module for help about regexps).
            
            For example, to select variables with names beginning with "data",
            the selection string may be either
            
            "data*" (a shell-type glob) or "^data*" (a regexp string)
            
            Whether the sele tion string is interpreted as a glob or a regexp
            depends on the value of the "glob" parameter (see below).
        
        2) a type, or an iterable (list, tuple) of types
        
            This allows to select all ws variables of the type(s) specified in 'sel'
            
    Named parameters:
    =================
    glob: bool, default is True
    
        When True, the selection strings in args are treated these as UNIX 
            shell-type globs.
            
        Otherwise, they are treated as regular expression strings.
    
    ws : a dictionary or None (default).
    
        When a dict, its keys must all be strings, and represents the namespace
        where the variables are searched.
        
            This can be:
            a) a global namespace as returned by globals(),
            b) a local namespace as returned by locals(), or vars()
            c) an object's namespace as returned by vars([object]);
                this is technically the __dict__ attribute of the object
            
        When None, the function searches inside the user namespace. This is a
        reference to the console kernel's namespace and is the same as the 
        "workspace" attribute of Scipyen's main window. In turn, Scipyen
        main window is referenced as the "mainWindow" variable in the console 
        namespace.
        
    as_dict: bool, default False.
    
        When True, returns an ordered dict with objects stored by their names in
        the search namespace, sorted alphabetically;
        
        Whe False (the default) the function return a list of objects.
        
    sort:bool, default is False
        Sort the variables according to their name (by default) or by sortkey
        
    sortkey:None or an objetc that is valid as a sort key for list sorting
        (see list.sorted() or sort() python functions)
        
    reverse:bool, default is False.
        When sort is True, the data is sorted in reverse order. Otherwise, this 
        is ignored.
    
            
    Returns:
    ========
    a list or a dict.
        The list is sorted by the variable name.

    Examples:
    =========
    
    ret = getvars(some_type, ws=globals())
    
        Returns a list of all variables in the user namespace that are instances of some_type
        
    ret = getvars(list_or_tuple_of_type_objects, ws=globals())
    
        Returns a list of variables in the user namespace that are instances of any of the 
            types contained in list_or_tuple_of_type_objects
            
    ret = getvars(regexp, glob=False, ws=globals())
    
    ret = getvars(glob_pattern, glob=True, ws=globals())
        Return a list of variables in the user name space, with names that return a match 
        for the string in regexp
        
        
    ret = getvars(neo.Block, ws = locals())
    
    # useful idioms:
    
    # lst=[(name, val) for (name, val) in locals().items() if isinstance(val,neo.Block)]
    #
    #sort by variable name:
    #
    # slst = sorted(lst, key = lambda v: v[0])
    #
    #
    # check this: compare lst[0][0] with slst[0][0]
    
    NOTE: The function calls lsvars(...) to select the variables.
    
    See also: lsvars(), sorted()
            
    NOTE: The function was designed to complement the %who, %who_ls and %whos 
            IPython linemagics, which conspicuously lack the facility to filter
            their output according to variable names or types. It is NOT thread
            safe -- if the contents of the "ws" workspace are concurrently 
            modified by another thread, it may raise an exception.
            
    """
    if ws is None:
        frames_list = inspect.getouterframes(inspect.currentframe())
        for (n, f) in enumerate(frames_list):
            if "mainWindow" in f[0].f_globals.keys(
            ):  # hack to find out the "global" namespace accessed from within the IPython console
                ws = f[0].f_globals["mainWindow"].workspace
                #ws = f[0].f_globals
                break

    var_names = lsvars(*args, glob=glob, ws=ws)

    if as_dict:
        lst = [(n, ws[n]) for n in var_names]

        if sort and sortkey is not None:
            lst.sort(key=sortkey)

        ret = OrderedDict(lst)

    else:
        ret = [ws[n] for n in var_names]

        if sort and sortkey is not None:
            ret.sort(key=sortkey)

    return ret
Exemplo n.º 16
0
    def get_nav_menu(self):
        site_menu = list(self.get_site_menu() or [])
        had_urls = []

        def get_url(menu, had_urls):
            if "url" in menu:
                had_urls.append(menu["url"])
            if "menus" in menu:
                for m in menu["menus"]:
                    get_url(m, had_urls)

        get_url({"menus": site_menu}, had_urls)

        nav_menu = OrderedDict()

        for model, model_admin in self.admin_site._registry.items():
            if getattr(model_admin, "hidden_menu", False):
                continue
            app_label = model._meta.app_label
            app_icon = None
            model_dict = {
                "title": smart_text(capfirst(model._meta.verbose_name_plural)),
                "url": self.get_model_url(model, "changelist"),
                "icon": self.get_model_icon(model),
                "perm": self.get_model_perm(model, "view"),
                "order": model_admin.order,
            }
            if model_dict["url"] in had_urls:
                continue

            app_key = "app:%s" % app_label
            if app_key in nav_menu:
                nav_menu[app_key]["menus"].append(model_dict)
            else:
                # Find app title
                app_title = smart_text(app_label.title())
                if app_label.lower() in self.apps_label_title:
                    app_title = self.apps_label_title[app_label.lower()]
                else:
                    app_title = smart_text(
                        apps.get_app_config(app_label).verbose_name)
                # find app icon
                if app_label.lower() in self.apps_icons:
                    app_icon = self.apps_icons[app_label.lower()]

                nav_menu[app_key] = {
                    "title": app_title,
                    "menus": [model_dict],
                }

            app_menu = nav_menu[app_key]
            if app_icon:
                app_menu["first_icon"] = app_icon
            elif ("first_icon" not in app_menu or app_menu["first_icon"]
                  == self.default_model_icon) and model_dict.get("icon"):
                app_menu["first_icon"] = model_dict["icon"]

            if "first_url" not in app_menu and model_dict.get("url"):
                app_menu["first_url"] = model_dict["url"]

        for menu in nav_menu.values():
            menu["menus"].sort(key=sortkeypicker(["order", "title"]))

        nav_menu = list(nav_menu.values())
        nav_menu.sort(key=lambda x: x["title"])

        site_menu.extend(nav_menu)

        return site_menu
Exemplo n.º 17
0
def getCombinedResults(request, _id):
    try:
        image_instance = QueryImage.objects.get(_id=_id)
        media_path = os.path.join(settings.BASE_DIR, 'media')
        image_path = os.path.join(media_path, str(image_instance.file))

        rbsd = RBSDescriptor()
        img_array = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
        img_array = rbsd.image_preprocessing(img_array)
        q_moment = rbsd.zernike_moments(img_array)
        sim_rbsd = rbsd.similarity(q_moment)
        sim_rbsd.sort(key=lambda x: x['similarity'], reverse=True)

        cld = CLDescriptor()
        img_array = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
        descriptor = cld.compute(img_array).reshape(1, -1)

        sim_cld = get_similarity(descriptor)
        sim_cld.sort(key=lambda x: x['similarity'], reverse=True)

        sim = OrderedDict()

        for item in sim_rbsd:
            sim[item['name']] = item

        for item in sim_cld:
            if item['name'] in sim:
                # print(sim[item['name']], sim[item['name']]['similarity'], item['name'], item['similarity'])
                avg = np.average([sim[item['name']]['similarity'], item['similarity']])
                if avg > 100:
                    avg = float(100)
                sim[item['name']]['similarity'] = avg

        sim = list(sim.values())
        sim.sort(key=lambda x: x['similarity'], reverse=True)

        response = JsonResponse({
            'result': sim[:200],
            'cld': sim_cld[:200],
            'rbsd': sim_rbsd[:200],
            'features': ['Combined CLD & RBSD', 'CLD', 'RBSD']
        })

        # TODO this code if server side pagination is needed
        # if request.GET.get('first', False) and request.GET.get('first') == '1':
        #     rbsd = RBSDescriptor()
        #     img_array = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
        #     img_array = rbsd.image_preprocessing(img_array)
        #     q_moment = rbsd.zernike_moments(img_array)
        #     sim_rbsd = rbsd.similarity(q_moment)
        #     # sim_rbsd.sort(key=lambda x: x['similarity'], reverse=True)
        #
        #     cld = CLDescriptor()
        #     img_array = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
        #     descriptor = np.around(cld.compute(img_array), decimals=4).reshape(1, -1)
        #
        #     sim_cld = get_similarity(descriptor)
        #     # sim_cld.sort(key=lambda x: x['similarity'], reverse=True)
        #
        #     sim = OrderedDict()
        #
        #     for item in sim_rbsd:
        #         sim[item['name']] = item
        #
        #     for item in sim_cld:
        #         if item['name'] in sim:
        #             # print(sim[item['name']], sim[item['name']]['similarity'], item['name'], item['similarity'])
        #             sim[item['name']]['similarity'] = np.average([sim[item['name']]['similarity'], item['similarity']])
        #
        #     sim = list(sim.values())
        #     sim.sort(key=lambda x: x['similarity'], reverse=True)
        #     result = sim[:10]
        #
        #     query_image_similarity_instance = QueryImageSimilarity()
        #     query_image_similarity_instance.query_image_id = _id
        #     query_image_similarity_instance.similarities = sim
        #     query_image_similarity_instance.save()
        #
        #     response = JsonResponse({
        #         'result': result
        #     })
        # elif request.GET.get('first', False) and request.GET.get('first') == '0' and request.GET.get('page', False):
        #     page = int(request.GET.get('page'))
        #     query_image_similarity_instance = QueryImageSimilarity.objects.get(query_image_id=_id)
        #     f = (10 * page) - 10
        #     l = (page * 10)
        #     result = query_image_similarity_instance.similarities[f:l]
        #     response = JsonResponse({
        #         'result': result
        #     })
        # else:
        #     response = JsonResponse({
        #         'error': 'All query parameters not received'
        #     })
    except Exception as e:
        print(traceback.print_exc())
        response = JsonResponse({
            'error': str(traceback.print_exc())
        })
    finally:
        return response
Exemplo n.º 18
0
class Structure:
	"""Just one of these. This holds all information (Posts, Joints, Notches)
		for an entire structure"""
	
	def __init__(self):
		"""Initialize a Structure"""
		
		self.posts = OrderedDict()
		self.joints = []
		
		self.dim = 0
		self.maxId = 0
		
	###########
	#Structure Class Functions
	
	def info(self):
		"""Displays a text summary of this Structure."""
		
		print "Structure: " + \
		" Posts: " + str(len(self.posts)) + \
		" Joints: " + str(len(self.joints)) + \
		"\n----"
	
	def display(self):
		"""Create objects in viewport to display information about this Structure
		
		Creates: nothing!
		
		"""
	
	def selectAxes(self):
		"""Allow the selection of lines representing post axes.
			Adds objects to structure as unordered list of lines
		"""
		
		axes = Rhino.Input.RhinoGet.GetMultipleObjects("Select Post Axes.", True, 
			Rhino.DocObjects.ObjectType.AnyObject)[1]
		
		self.axes = {}
		
		for axis in axes:
			try: 
				id = int(axis.Object().Name)
			except ValueError:
				raise NameError("Currently, un-numbered axes are not supported!")
				return True
				#unNumbered = True
				#self.unnumberedAxes.append(
				#	Rhino.Geometry.Line(axis.PointAtStart, axis.PointAtEnd))
				
			axis = axis.Object().CurveGeometry
			
			self.axes[id] = Rhino.Geometry.Line(axis.PointAtStart, axis.PointAtEnd)
			
			self.maxId = max(self.maxId, id)
			self.dim += 1		
		
		#sort axes by id
		self.axes = OrderedDict(sorted(self.axes.items(), key=lambda i: i[0]))
		
		return False
		
	def orderAxes(self):
		"""Assign labels to axes, ordering them along a vector.
			Orders self.axes and optionally adds names to Rhino objects.
		"""
		print "Define sort direction."
		result = Rhino.Input.RhinoGet.GetLine()
		if result[0] == Rhino.Commands.Result.Success:
			sortLine = result[1]
		else:
			raise NameError("Unable to get line for sort axis.")
		
		def sortFunction(axis):
			plane = Rhino.Geometry.Plane(sortLine.From, 
				Rhino.Geometry.Vector3d(sortLine.To - sortLine.From))
			return plane.DistanceTo(axis.PointAt(.5))
		
		self.axes.sort(key=sortFunction)
	
	def axesToPosts(self):
		"""turn all axes in self.axes into posts"""
		
		for key in self.axes:
			if self.axes[key] != None:
				p = Post(axis=self.axes[key], width=common.settings.main['postWidth'], 
					height=common.settings.main['postWidth'], id=key)
				self.addPost(p)
	
	def findPairs(self):
		"""Find all pairs of Posts in Structure which might intersect
		
		Returns: list of lists, each with two posts
		"""
		
		pairs = []
		keys = self.axes.keys()
		
		#loop through indeces of each Post
		for a in range(0, self.dim):
			#loop through all remaining Posts (higher indices)
			for b in range(a+1, self.dim):
				#only accept pairs within specified distance
				if rs.Distance(*common.findClosestPoints(self.posts[keys[a]].axis, 
					self.posts[keys[b]].axis)) < 2:
					pairs.append((keys[a], keys[b]))
					self.connections[keys[a]][keys[b]] = [[keys[b]]]
					self.connections[keys[b]][keys[a]] = [[keys[a]]]
		
		return pairs
	
	def makePockets(self, pocketClass):
		"""Make all Joints and Pockets necessary for current Structure
			Decide gender of pockets for joints which matter
			thanks to blokhead for the script, originally in perl
				(http://www.perlmonks.org/?node_id=522270)
		"""
		
		#initialize connection matrix
		self.connections = [[[[]] for i in range(self.maxId+1)] 
			for j in range(self.maxId+1)]
		
		#get all potential Joint pairs
		pairs = self.findPairs()
		
		###
		#assign genders to pockets and make joints
		
		#compute the 3rd power of the adjacency matrix with our modified multiplication
		current = deepcopy(self.connections)
		
		"""modified matrix multiplication. instead of multiplication, we
			combine paths from i->k and k->j to get paths from i->j (this is why
			we include the endpoint in the path, but not the starting point).
			then instead of addition, we union all these paths from i->j
		"""
		
		result1 = [[None for i in range(self.maxId+1)] for j in range(self.maxId+1)]
		for row in range(self.maxId+1):
			for column in range(self.maxId+1):
				new_paths = []
				for item in range(self.maxId+1): #connect new paths to all old paths
					for path in current[row][item]:
						for pathEnd in self.connections[item][column]:
							if pathEnd and pathEnd[0] != row: #weed out A-B-A paths
								new_paths.append(path + pathEnd)
				result1[row][column] = new_paths
		
		paths = [[None for i in range(self.maxId+1)] for j in range(self.maxId+1)]
		for row in range(self.maxId+1):
			for column in range(self.maxId+1):
				new_paths = []
				for item in range(self.maxId+1): #connect new paths to all old paths
					for path in result1[row][item]:
						for pathEnd in self.connections[item][column]:
							if pathEnd and len(path) == 2 and pathEnd[0] != path[-2]: #weed out A-B-C-B paths
								new_paths.append(path + pathEnd)
				paths[row][column] = new_paths
		
		#self.printPaths(paths)
		#keep track of desired pocket genders
		genders = {}
		
		for post in range(self.maxId+1):
			for path in paths[post][post]:
				#loop through each entry in matrix which represents a circular path of length three
				#check for partially connected rings
				dup = 0
				
				if len(path) == 3: #this post is part of a minor figure
					for i in range(3): #loop through 3 joints in figure
						p0 = path[i]
						p1 = path[(i+1) % 3]
						
						#gender relationship is same for two joints on figure,
						# different for the third (i == 0)
						gender = (i != 0)
						
						if p0 > p1: #pair ids are not in order
							p0,p1 = p1,p0
							gender = not gender
						if (p0,p1) in genders: #already decided this ring
							dup = 1
						else:
							if dup == 1: #bad. two minor figures are connected but not identical
								print "Connected rings at joint ({0}, {1})".format(p0,p1)
							genders[(p0,p1)] = gender
		#create all joints
		fringe = []
		for pair in pairs:
			if pair in genders:
				gender = genders[pair]
			else: #joint not in a minor figure
				fringe.append(pair)
				#assign default gender relationship
				gender = 0
			
			if gender:
				#reverse pair order to invert male/female relationship
				pair = pair[::-1]
			#create pockets for this joint
			self.joints.append(Joint(self.posts[pair[0]], self.posts[pair[1]], len(self.joints),
				pocketClass=pocketClass))
		
		print "Joints not in a complete minor figure: \n", fringe
	
	def printPaths(self, connections):
		"""the i,j entry of the matrix is a list of all the paths from i to j, but
			without "i," at the beginning, so we must add it
		"""
		
		for i,row in enumerate(connections):
			for j,paths in enumerate(row):
				out = '('
				for path in paths:
					out += str(path) + ", "
				out = out[:-2] + ')'
				print "{0},{1}: {2}".format(i,j,out)
	
	def layOut(self, postObjects=None, pocketObjects=None):
		"""Reorient posts with pocket info to world coordinates
		
		Creates: Recreates pocket geometry at origin for all posts in structure
		"""
		
		offset = 0
		
		#set defaults
		if postObjects == None:
			postObjects = ['axis', 'label', 'profile']
		if pocketObjects == None:
			pocketObjects = ['toolpath', 'holes']
		
		for key in self.posts:
			post = self.posts[key]
			
			if post.isConnected:
				transform = copy(post.globalToSelf)
				#add offset into transformation
				transform.M13 += offset
				
				#start list of objects to be transformed with basic post geometry
				guids = post.display(postObjects)
				
				for pocket in post.pockets:
					#transform geometry for each pocket
					#objects.append(sc.doc.Objects.AddSurface(pocket.face))
					guids.extend(pocket.display(pocketObjects))
				for i in reversed(range(len(guids))):
					if type(guids[i]) != System.Guid:
						print "Removed one item of type: {0}\n".format(type(guids[i]))
						del guids[i]
				
				rs.TransformObjects(guids, transform)
				
				offset += 8*common.settings.main['globalScale']
		
	def writeGcode(self):
		"""Organize writing gcode for all posts to files"""
		
		for key in self.posts:
			post = self.posts[key]
			
			gcode = common.Gcode()
			
			f = open('gcode/{0}.nc'.format(post.printId()), 'w')
			
			post.makeGcode(gcode=gcode)
			
			f.write("%\n")
			f.write(gcode.text)
			f.write("\n%")
			
			f.close()
		
	def addPost(self, post):
		"""Add a Post to this Structure and give it an id if necessary"""
		
		if post.id == None:
			raise NameError("Posts without named ids aren't currently supported!")
			#post.id = self.dim
			#self.dim += 1

		self.posts[post.id] = post
				
# End Structure Class #
Exemplo n.º 19
0
def standardize_data(data):
    """Standardize photometric data by converting to a structured numpy array
    with standard column names (if necessary) and sorting entries in order of
    increasing time.

    Parameters
    ----------
    data : `~astropy.table.Table` or `~numpy.ndarray` or `dict`

    Returns
    -------
    standardized_data : `~numpy.ndarray`
    """

    warn_once(
        'standardize_data', '1.5', '2.0',
        'This function not intended for public use; open an issue at '
        'https://github.com/sncosmo/sncosmo/issues if you need this '
        'functionality.')

    if isinstance(data, Table):
        data = np.asarray(data)

    if isinstance(data, np.ndarray):
        colnames = data.dtype.names

        # Check if the data already complies with what we want
        # (correct column names & ordered by date)
        if (set(colnames) == set(PHOTDATA_ALIASES.keys())
                and np.all(np.ediff1d(data['time']) >= 0.)):
            return data

    elif isinstance(data, dict):
        colnames = data.keys()

    else:
        raise ValueError('Unrecognized data type')

    # Create mapping from lowercased column names to originals
    lower_to_orig = dict([(colname.lower(), colname) for colname in colnames])

    # Set of lowercase column names
    lower_colnames = set(lower_to_orig.keys())

    orig_colnames_to_use = []
    for aliases in PHOTDATA_ALIASES.values():
        i = lower_colnames & aliases
        if len(i) != 1:
            raise ValueError('Data must include exactly one column from {0} '
                             '(case independent)'.format(', '.join(aliases)))
        orig_colnames_to_use.append(lower_to_orig[i.pop()])

    if isinstance(data, np.ndarray):
        new_data = data[orig_colnames_to_use].copy()
        new_data.dtype.names = list(PHOTDATA_ALIASES.keys())

    else:
        new_data = OrderedDict()
        for newkey, oldkey in zip(PHOTDATA_ALIASES.keys(),
                                  orig_colnames_to_use):
            new_data[newkey] = data[oldkey]

        new_data = dict_to_array(new_data)

    # Sort by time, if necessary.
    if not np.all(np.ediff1d(new_data['time']) >= 0.):
        new_data.sort(order=['time'])

    return new_data
Exemplo n.º 20
0
Arquivo: pyxdf.py Projeto: dojeda/xdf
def load_xdf(filename,
             on_chunk=None,
             synchronize_clocks=True,
             handle_clock_resets=True,
             dejitter_timestamps=True,
             jitter_break_threshold_seconds=1,
             jitter_break_threshold_samples=500,
             clock_reset_threshold_seconds=5,
             clock_reset_threshold_stds=5,
             clock_reset_threshold_offset_seconds=1,
             clock_reset_threshold_offset_stds=10,
             winsor_threshold=0.0001,
             sort_streams=True,
             headers_only=False,
             xml_parser='default'):
    """Import an XDF file.

    This is an importer for multi-stream XDF (Extensible Data Format)
    recordings. All information covered by the XDF 1.0 specification is
    imported, plus any additional meta-data associated with streams or with
    the container file itself.

    See https://github.com/sccn/xdf/ for more information on XDF.

    The function supports several further features, such as robust time
    synchronization, support for breaks in the data, as well as some other
    defects.

    Args:
        filename : name of the file to import (*.xdf or *.xdfz)

        synchronize_clocks : Whether to enable clock synchronization based on
          ClockOffset chunks. (default: true)

        dejitter_timestamps : Whether to perform jitter removal for regularly
          sampled streams. (default: true)

        on_chunk : Function that is called for each chunk of data as it is
           being retrieved from the file; the function is allowed to modify
           the data (for example, sub-sample it). The four input arguments
           are 1) the matrix of [#channels x #samples] values (either numeric
           or 2d cell array of strings), 2) the vector of unprocessed local
           time stamps ( one per sample), 3) the info struct for the stream (
           same as the .info field in the final output, buth without the
           .effective_srate sub-field), and 4) the scalar stream number (
           1-based integers). The three return values are 1) the (optionally
           modified) data, 2) the (optionally modified) time stamps, and 3)
           the (optionally modified) header (default: []).

        Parameters for advanced failure recovery in clock synchronization:

        handle_clock_resets : Whether the importer should check for potential
          resets of the clock of a stream (e.g. computer restart during
          recording, or hot-swap). Only useful if the recording system
          supports recording under such circumstances. (default: true)

        clock_reset_threshold_stds : A clock reset must be accompanied by a
          ClockOffset chunk being delayed by at least this many standard
          deviations from the distribution. (default: 5)

        clock_reset_threshold_seconds : A clock reset must be accompanied by a
          ClockOffset chunk being delayed by at least this many seconds. (
          default: 5)

        clock_reset_threshold_offset_stds : A clock reset must be accompanied
          by a ClockOffset difference that lies at least this many standard
          deviations from the distribution. (default: 10)

        clock_reset_threshold_offset_seconds : A clock reset must be
          accompanied by a ClockOffset difference that is at least this many
          seconds away from the median. (default: 1)

        winsor_threshold : A threshold above which jitters the clock offsets
          will be treated robustly (i.e., like outliers), in seconds
          (default: 0.0001)

        headers_only: Read only the file and stream header. No stream data will
          be decoded. (default: False)

        sort_streams: Sort the streams by their names, as present in their
          stream headers. (default: True)

        Parameters for jitter removal in the presence of data breaks:

        jitter_break_threshold_seconds : An interruption in a regularly-sampled
          stream of at least this many seconds will be considered as a
          potential break (if also the jitter_break_threshold_samples is
          crossed) and multiple segments will be returned. (default: 1)

        jitter_break_threshold_samples : An interruption in a regularly-sampled
          stream of at least this many samples will be considered as a
          potential break (if also the jitter_break_threshold_samples is
          crossed) and multiple segments will be returned. (default: 500)

    Returns:
        streams : list of dicts, one for each stream; the dicts
                  have the following content:
                 ['time_series'] entry: contains the stream's time series
                   [#Channels x #Samples] this matrix is of the type declared in
                   ['info']['channel_format']
                 ['time_stamps'] entry: contains the time stamps for each sample
                   (synced across streams)

                 ['info'] field: contains the meta-data of the stream
                   (all values are strings)
                   ['name']: name of the stream
                   ['type']: content-type of the stream ('EEG','Events', ...)
                   ['channel_format']: value format ('int8', 'int16', 'int32',
                     'int64', 'float32', 'double64', 'string')
                   ['nominal_srate']: nominal sampling rate of the stream
                     (as declared by the device); zero for streams with
                     irregular sampling rate
                   ['effective_srate']: effective (measured) sampling rate of
                     the stream, if regular (otherwise omitted)
                   ['desc']: dict with any domain-specific meta-data declared
                     for the stream; see www.xdf.org for the declared
                     specifications

        fileheader : dict with file header contents in the "info" field

    Examples:
        load the streams contained in a given XDF file
        >>> streams, fileheader = load_xdf('C:\Recordings\myrecording.xdf')

    License:
        This file is covered by the BSD license.

        Copyright (c) 2015-2018, Syntrogi Inc. dba Intheon

        Redistribution and use in source and binary forms, with or without
        modification, are permitted provided that the following conditions are
        met:

            * Redistributions of source code must retain the above copyright
              notice, this list of conditions and the following disclaimer.
            * Redistributions in binary form must reproduce the above copyright
              notice, this list of conditions and the following disclaimer in
              the documentation and/or other materials provided with the
              distribution

        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
        "AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
        LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
        A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
        OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
        SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
        LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
        DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
        THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (
        INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
        OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

    """
    class StreamData:
        """Temporary per-stream data."""
        def __init__(self, xml):
            """Init a new StreamData object from a stream header."""
            fmt2char = {
                'int8': 'b',
                'int16': 'h',
                'int32': 'i',
                'int64': 'q',
                'float32': 'f',
                'double64': 'd'
            }
            fmt2nbytes = {
                'int8': 1,
                'int16': 2,
                'int32': 4,
                'int64': 8,
                'float32': 4,
                'double64': 8
            }
            # number of channels
            self.nchns = int(xml['info']['channel_count'][0])
            # nominal sampling rate in Hz
            self.srate = round(float(xml['info']['nominal_srate'][0]))
            # format string (int8, int16, int32, float32, double64, string)
            self.fmt = xml['info']['channel_format'][0]
            # list of time-stamp chunks (each an ndarray, in seconds)
            self.time_stamps = []
            # list of time-series chunks (each an ndarray or list of lists)
            self.time_series = []
            # list of clock offset measurement times (in seconds)
            self.clock_times = []
            # list of clock offset measurement values (in seconds)
            self.clock_values = []
            # last observed time stamp, for delta decompression
            self.last_timestamp = 0.0
            # nominal sampling interval, in seconds, for delta decompression
            self.tdiff = 1.0 / self.srate if self.srate > 0 else 0.0
            # effective sampling rate
            self.effective_srate = 0
            # pre-calc some parsing parameters for efficiency
            if self.fmt != 'string':
                # number of bytes to read from stream to handle one sample
                self.samplebytes = self.nchns * fmt2nbytes[self.fmt]
                # format string to pass to struct.unpack() to handle one sample
                self.structfmt = '<%s%s' % (self.nchns, fmt2char[self.fmt])

    logger.info('Importing XDF file %s...' % filename)
    if not os.path.exists(filename):
        raise Exception('file %s does not exist.' % filename)

    if xml_parser == 'default':
        xml_load_func = _xml2dict
    elif xml_parser == 'parker':
        xml_load_func = _load_xml
    else:
        raise ValueError('Invalid xml_parser parameter')

    # dict of returned streams, in order of apparance, indexed by stream id
    streams = OrderedDict()
    # dict of per-stream temporary data (StreamData), indexed by stream id
    temp = {}
    # XML content of the file header chunk
    fileheader = None
    # number of bytes in the file for fault tolerance
    filesize = os.path.getsize(filename)

    # read file contents ([SomeText] below refers to items in the XDF Spec)
    with gzip.GzipFile(filename, 'rb') if filename.endswith('.xdfz') else open(
            filename, 'rb') as f:

        # read [MagicCode]
        if f.read(4) != b'XDF:':
            raise Exception('not a valid XDF file: %s' % filename)

        # for each chunk...
        StreamId = None
        while True:

            # noinspection PyBroadException
            try:
                # read [NumLengthBytes], [Length]
                chunklen = _read_varlen_int(f)
            except Exception:
                if f.tell() < filesize - 1024:
                    logger.warning(
                        'got zero-length chunk, scanning forward to '
                        'next boundary chunk.')
                    _scan_forward(f)
                    continue
                else:
                    logger.info('  reached end of file.')
                    break

            # read [Tag]
            tag = struct.unpack('<H', f.read(2))[0]
            log_str = ' Read tag: {} at {} bytes, length={}'.format(
                tag, f.tell(), chunklen)
            if tag in [2, 3, 4, 6]:
                StreamId = struct.unpack('<I', f.read(4))[0]
                log_str += ', StreamId={}'.format(StreamId)

            logger.debug(log_str)

            # Quick read of header only: when the chunk if not a header, move
            # the file pointer to the beginning of the next chunk
            if headers_only and tag not in (1, 2):
                offset = 2  # We already read 2 bytes for the tag
                if tag in (2, 3, 4, 6):
                    # In these cases, we already read 4 bytes!
                    offset += 4
                # Move n=chunklen-offset bytes forward, relative to current position (whence=1)
                f.seek(chunklen - offset, 1)
                continue

            # read the chunk's [Content]...
            if tag == 1:
                # read [FileHeader] chunk
                xml_string = f.read(chunklen - 2)
                fileheader = xml_load_func(ET.fromstring(xml_string))
            elif tag == 2:
                # read [StreamHeader] chunk...
                # read [Content]
                xml_string = f.read(chunklen - 6)
                decoded_string = xml_string.decode('utf-8', 'replace')
                hdr = xml_load_func(ET.fromstring(decoded_string))
                old_hdr = _xml2dict(ET.fromstring(decoded_string))
                streams[StreamId] = hdr
                logger.debug('  found stream ' + old_hdr['info']['name'][0])
                # initialize per-stream temp data
                temp[StreamId] = StreamData(old_hdr)
            elif tag == 3:
                # read [Samples] chunk...
                # noinspection PyBroadException
                try:
                    # read [NumSampleBytes], [NumSamples]
                    nsamples = _read_varlen_int(f)
                    # allocate space
                    stamps = np.zeros((nsamples, ))
                    if temp[StreamId].fmt == 'string':
                        # read a sample comprised of strings
                        values = [[None] * temp[StreamId].nchns
                                  for _ in range(nsamples)]
                        # for each sample...
                        for k in range(nsamples):
                            # read or deduce time stamp
                            if struct.unpack('B', f.read(1))[0]:
                                stamps[k] = struct.unpack('<d', f.read(8))[0]
                            else:
                                stamps[k] = (temp[StreamId].last_timestamp +
                                             temp[StreamId].tdiff)
                            temp[StreamId].last_timestamp = stamps[k]
                            # read the values
                            for ch in range(temp[StreamId].nchns):
                                raw = f.read(_read_varlen_int(f))
                                values[k][ch] = raw.decode(errors='replace')
                    else:
                        # read a sample comprised of numeric values
                        values = np.zeros((nsamples, temp[StreamId].nchns))
                        # for each sample...
                        for k in range(nsamples):
                            # read or deduce time stamp
                            if struct.unpack('B', f.read(1))[0]:
                                stamps[k] = struct.unpack('<d', f.read(8))[0]
                            else:
                                stamps[k] = (temp[StreamId].last_timestamp +
                                             temp[StreamId].tdiff)
                            temp[StreamId].last_timestamp = stamps[k]
                            # read the values
                            raw = f.read(temp[StreamId].samplebytes)
                            values[k, :] = struct.unpack(
                                temp[StreamId].structfmt, raw)
                    logger.debug('  reading [%s,%s]' %
                                 (temp[StreamId].nchns, nsamples))
                    # optionally send through the on_chunk function
                    if on_chunk is not None:
                        values, stamps, streams[StreamId] = on_chunk(
                            values, stamps, streams[StreamId], StreamId)
                    # append to the time series...
                    temp[StreamId].time_series.append(values)
                    temp[StreamId].time_stamps.append(stamps)
                except Exception as e:
                    # an error occurred (perhaps a chopped-off file): emit a
                    # warning and scan forward to the next recognized chunk
                    logger.error(
                        'found likely XDF file corruption (%s: %s), '
                        'scanning forward to next boundary chunk.',
                        type(e).__name__,
                        e,
                        exc_info=True)
                    _scan_forward(f)
            elif tag == 6:
                # read [StreamFooter] chunk
                xml_string = f.read(chunklen - 6)
                streams[StreamId]['footer'] = xml_load_func(
                    ET.fromstring(xml_string))
            elif tag == 4:
                # read [ClockOffset] chunk
                try:
                    temp[StreamId].clock_times.append(
                        struct.unpack('<d', f.read(8))[0])
                    temp[StreamId].clock_values.append(
                        struct.unpack('<d', f.read(8))[0])
                except Exception as e:
                    logger.error(
                        'found likely XDF file corruption (%s: %s), '
                        'scanning forward to next boundary chunk.',
                        type(e).__name__,
                        e,
                        exc_info=True)
                    # If reading clock_times succeeded, but not clock_values, drop that last
                    # observation
                    if StreamId in temp and len(
                            temp[StreamId].clock_times) > len(
                                temp[StreamId].clock_values):
                        temp[StreamId].clock_times.pop()
                    _scan_forward(f)
            else:
                # skip other chunk types (Boundary, ...)
                f.read(chunklen - 2)

    # Concatenate the signal across chunks
    for stream in temp.values():
        if stream.time_stamps:
            # stream with non-empty list of chunks
            stream.time_stamps = np.concatenate(stream.time_stamps)
            if stream.fmt == 'string':
                stream.time_series = list(itertools.chain(*stream.time_series))
            else:
                stream.time_series = np.concatenate(stream.time_series)
        else:
            # stream without any chunks
            stream.time_stamps = np.zeros((0, ))
            if stream.fmt == 'string':
                stream.time_series = []
            else:
                stream.time_series = np.zeros((stream.nchns, 0))

    # perform (fault-tolerant) clock synchronization if requested
    if synchronize_clocks:
        logger.info('  performing clock synchronization...')
        temp = _clock_sync(temp, handle_clock_resets,
                           clock_reset_threshold_stds,
                           clock_reset_threshold_seconds,
                           clock_reset_threshold_offset_stds,
                           clock_reset_threshold_offset_seconds,
                           winsor_threshold)

    # perform jitter removal if requested
    if dejitter_timestamps:
        logger.info('  performing jitter removal...')
        temp = _jitter_removal(
            temp,
            jitter_break_threshold_seconds,
            jitter_break_threshold_samples,
        )
    else:
        for stream in temp.values():
            if stream.time_stamps.shape[0] > 2:
                duration = stream.time_stamps[-1] - stream.time_stamps[0]
                stream.effective_srate = len(stream.time_stamps) / duration

    for k in streams.keys():
        stream = streams[k]
        tmp = temp[k]
        stream['info']['effective_srate'] = tmp.effective_srate
        stream['time_series'] = tmp.time_series
        stream['time_stamps'] = tmp.time_stamps

    streams = [s for s in streams.values()]
    if sort_streams:
        streams.sort(key=lambda s: s['info']['name'])

    return streams, fileheader
Exemplo n.º 21
0
    def get_nav_menu(self):
        """
        返回网站菜单,如果 `get_site_menu` 返回的结果不是 None ,那么将把其返回结果作为菜单的第一部分,而后会补全没有出现的 Model 列表页菜单项,
        如果 `get_site_menu` 返回为 None, 那么将自动根据 App 和 Model 生成两级的菜单。

        :rtype: 格式见 `get_site_menu` 返回格式
        """
        site_menu = list(self.get_site_menu() or [])
        had_urls = []

        # 选出所有已经存在的菜单项的 URL,后期自动生成菜单时会排除这些项。
        def get_url(menu, had_urls):
            if 'url' in menu:
                had_urls.append(menu['url'])
            if 'menus' in menu:
                for m in menu['menus']:
                    get_url(m, had_urls)

        get_url({'menus': site_menu}, had_urls)

        nav_menu = OrderedDict()  # 使用有序 dict,保证每次生成的菜单顺序固定

        for model, model_admin in self.admin_site._registry.items():
            if getattr(model_admin, 'hidden_menu', False):
                continue
            app_label = model._meta.app_label
            app_icon = None
            model_dict = {
                'title': smart_text(capfirst(model._meta.verbose_name_plural)),
                'url': self.get_model_url(model, "changelist"),
                'icon': self.get_model_icon(model),
                'perm': self.get_model_perm(model, 'view'),
                'order': model_admin.order,
            }
            if model_dict['url'] in had_urls:
                # 过如该url已经在之前的菜单项中存在,就跳过该项
                continue

            app_key = "app:%s" % app_label
            if app_key in nav_menu:
                nav_menu[app_key]['menus'].append(model_dict)
            else:
                # Find app title
                app_title = smart_text(app_label.title())
                if app_label.lower() in self.apps_label_title:
                    app_title = self.apps_label_title[app_label.lower()]
                else:
                    app_title = smart_text(
                        apps.get_app_config(app_label).verbose_name)
                # find app icon
                if app_label.lower() in self.apps_icons:
                    app_icon = self.apps_icons[app_label.lower()]

                nav_menu[app_key] = {
                    'title': app_title,
                    'menus': [model_dict],
                }

            app_menu = nav_menu[app_key]
            if app_icon:
                app_menu['first_icon'] = app_icon
            elif ('first_icon' not in app_menu or app_menu['first_icon']
                  == self.default_model_icon) and model_dict.get('icon'):
                app_menu['first_icon'] = model_dict['icon']

            if 'first_url' not in app_menu and model_dict.get('url'):
                app_menu['first_url'] = model_dict['url']

        for menu in nav_menu.values():
            menu['menus'].sort(key=sortkeypicker(['order', 'title']))

        nav_menu = list(nav_menu.values())
        nav_menu.sort(key=lambda x: x['title'])

        site_menu.extend(nav_menu)

        return site_menu
Exemplo n.º 22
0
class AnsibleCompleter(Completer):
    def __init__(self, inventory, root_commands, list_commands,
                 config_definitions, config):
        self.inventory = inventory
        self.root_commands = root_commands
        self.list_commands = list_commands
        self.config_definitions = config_definitions
        self.config = config

        self.hosts = [x.name for x in self.inventory.list_hosts()]
        self.groups = self.inventory.list_groups()

        self.modules = self.list_modules()

    def get_module_meta(self, module):
        in_path = module_loader.find_plugin(module)

        if in_path and not in_path.endswith('.ps1'):
            module_vars, a, _, _ = plugin_docs.get_docstring(
                in_path, fragment_loader)
        else:
            module_vars = None
        return module_vars

    def list_modules(self):
        modules = set()

        module_paths = module_loader._get_paths()
        for path in module_paths:
            if path is not None:
                modules.update(self._find_modules_in_path(path))

        return modules

    def _find_modules_in_path(self, path):
        if os.path.isdir(path):
            for module in os.listdir(path):
                if module.startswith('.'):
                    continue
                elif os.path.isdir(module):
                    self._find_modules_in_path(module)
                elif module.startswith('__'):
                    continue
                elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
                    continue
                elif module in C.IGNORE_FILES:
                    continue
                elif module.startswith('_'):
                    fullpath = '/'.join([path, module])
                    if os.path.islink(fullpath):  # avoids aliases
                        continue
                    module = module.replace('_', '', 1)

                module = os.path.splitext(module)[0]  # removes the extension
                yield module

    def _module_completion(self):
        """Retrieve module names and args for the module command completion"""
        if len(self.word_list) >= 3:
            module = self.word_list[1]
            meta = self.get_module_meta(module)
            if meta:
                options = meta['options']
                matched_options = self._match_input(self.cur_word, options)
                self.completions = [x + "=" for x in matched_options]
        else:
            self.completions = self._match_input(self.cur_word, self.modules)

    def _playbook_completion(self):
        """Retrieve file paths for the playbook command completion"""
        files_list = []
        exclude_folders = ["group_vars", "host_vars", "roles"]
        for folder in self.config.configurations["playbook_folders"]:
            if os.path.isdir(folder):
                for root, dirs, files in os.walk(folder):
                    if os.path.basename(root) not in exclude_folders:
                        files_list += [
                            os.path.join(root, file) for file in files
                            if (file.endswith(".yml") or file.endswith(".yaml")
                                ) and (
                                    self.cur_word in os.path.join(root, file))
                        ]
        self.completions = files_list

    def _target_completion(self):
        """Retrieve informations from the inventory for target completion"""
        pattern = r'([&|!|^]*)([^:!&]*)([:]*)'
        matches = re.findall(pattern, self.cur_word)

        if matches[len(matches) - 2][2] == ':':
            real_cur_word = ''
        else:
            real_cur_word = matches[len(matches) - 2][1]

        string_before_cur_word = ''.join(
            [''.join(x) for x in matches if x[2] == ':'])

        if matches[len(matches) - 2][2] != ':':
            string_before_cur_word += matches[len(matches) - 2][0]

        self.completions = [
            string_before_cur_word + x for x in self.hosts + self.groups if
            x.startswith(real_cur_word) and x not in [y[1] for y in matches]
        ]

    def _list_completion(self):
        """Retrieve elements for the list command completion"""
        self.completions = OrderedDict(
            (key, value) for key, value in self.list_commands.iteritems()
            if key.startswith(self.cur_word))
        update = OrderedDict((x, "A group from inventory") for x in self.groups
                             if x.startswith(self.cur_word))
        self.completions.update(update)

    def _set_completion(self):
        """Retrieve configuration variables for the set command completion"""
        self.completions = OrderedDict(
            (key, value["description"])
            for key, value in self.config_definitions.iteritems()
            if key.startswith(self.cur_word))

    def _match_input(self, input, struct):
        if isinstance(struct, dict):
            result = OrderedDict((key, value)
                                 for key, value in struct.iteritems()
                                 if key.startswith(input))
        elif isinstance(struct, list) or isinstance(struct, set):
            result = [x for x in struct if x.startswith(input)]
        return result

    def get_completions(self, document, complete_event):
        self.cur_text = document.text_before_cursor
        self.cur_word = document.get_word_before_cursor(WORD=True)
        self.word_list = self.cur_text.split(' ')
        complete_playbook = "playbook_folders" in self.config.configurations
        self.completions = []

        if len(self.word_list) == 1:
            self.completions = self._match_input(self.cur_word,
                                                 self.root_commands)
        else:
            if self.word_list[0] == "module":
                self._module_completion()
            elif self.word_list[0] == "playbook" and complete_playbook:
                self._playbook_completion()
            elif len(self.word_list) == 2:
                if self.word_list[0] == "target":
                    self._target_completion()
                elif self.word_list[0] == "list":
                    self._list_completion()
                elif self.word_list[0] == "set":
                    self._set_completion()

        if isinstance(self.completions, list):
            self.completions.sort()

        for word in self.completions:
            if isinstance(self.completions, dict):
                meta = self.completions[word].decode('utf-8')
            else:
                meta = None

            yield Completion(word.decode('utf-8'),
                             -len(self.cur_word),
                             display_meta=meta)
Exemplo n.º 23
0
od = []
ed = []
for i in range(len(s)):
    if s[i].isdigit():
        if int(s[i]) % 2 == 0:
            ed.append(s[i])
        else:
            od.append(s[i])
    elif s[i].islower():
        lc.append(s[i])
    elif s[i].isupper():
        uc.append(s[i])
out = ""
lc.sort()
uc.sort()
od.sort()
ed.sort()
for item in lc:
    out += item
for item in uc:
    out += item
for item in od:
    out += item
for item in ed:
    out += item
print(out)

### Python Functionals

## Ex 1 - Map and Lambda
Exemplo n.º 24
0
# Example 7
print(heappop(a), heappop(a), heappop(a), heappop(a))


# Example 8
a = []
heappush(a, 5)
heappush(a, 3)
heappush(a, 7)
heappush(a, 4)
assert a[0] == nsmallest(1, a)[0] == 3


# Example 9
print('Before:', a)
a.sort()
print('After: ', a)


# Example 10
x = list(range(10**6))
i = x.index(991234)
print(i)


# Example 11
from bisect import bisect_left
i = bisect_left(x, 991234)
print(i)

Exemplo n.º 25
0
    a['bar'] = 2
    b = OrderedDict()
    b['foo'] = 'red'
    b['bar'] = 'blue'

    for value1, value2 in zip(a.values(), b.values()):
        print(value1, value2)

    # defaultdict
    stats = defaultdict(int)
    stats['my_counter'] += 1

    # heap
    a = []
    heappush(a, 5)
    heappush(a, 3)
    heappush(a, 7)
    heappush(a, 4)
    # print(heappop(a), heappop(a), heappop(a), heappop(a))
    assert a[0] == nsmallest(1, a)[0] == 3
    print('Before:', a)
    a.sort()
    print('After:', a)

    # bisect
    x = list(range(10**6))
    i = x.index(991234)
    print(i)
    i1 = bisect.bisect_left(x, 991234)
    print(i1)
Exemplo n.º 26
0
from collections import OrderedDict

cluster_labels = res[1]
num_cluster = res[0]
res_cluster = OrderedDict()

for i in range(0, len(cluster_labels)):
    if cluster_labels[i] in res_cluster:
        res_cluster[cluster_labels[i]].append(i)
    else:
        res_cluster[cluster_labels[i]] = [i]
print("------------==============--------------")
res_cluster = [res_cluster[i] for i in range(0, num_cluster)]
res_cluster = [sorted(r) for r in res_cluster if len(r) > 1]
res_cluster.sort(key=len, reverse=True)
print("Number of cluster: ", len(res_cluster))
print("Number of clustered documents: ",
      len([j for i in res_cluster for j in i]))
print("Number of noise documents: ",
      len(documents) - len([j for i in res_cluster for j in i]))
print("-----------==========----------")
# print(len(res_cluster[0]))
number = len(res_cluster)
checks = []
for i in range(0, number):
    # print('Cluster' + ' ' + str(i) + ' ' + 'has :' + str(len(res_cluster[i])) )
    # print(len(res_cluster[i]))
    check = 'Cluster' + ' ' + str(i) + ' ' + 'has :' + str(len(
        res_cluster[i])) + '\n'
    # print(check)