def main_menu(self, logger, *args, **kwargs):
        "Get initial papers for modelling."
        while True:
            n_included = np.sum(self.y[self.train_idx] == 1)
            n_excluded = np.sum(self.y[self.train_idx] == 0)
            action = questionary.select(
                'What do you want to do next?',
                choices=[
                    "Find papers by keywords", "Find papers by ID",
                    questionary.Separator(),
                    f"Continue review ({n_included} included, "
                    f"{n_excluded} excluded)", "Export",
                    questionary.Separator(), "Stop"
                ]).ask()

            if action is None or action.startswith("Stop"):
                stop = questionary.confirm("Are you sure you want to stop?",
                                           default=False).ask()
                if stop:
                    raise KeyboardInterrupt
            elif action.endswith("by keywords"):
                self._papers_from_finder(logger)
            elif action.endswith("by ID"):
                self._papers_from_id(logger)
            elif action.startswith("Export"):
                self._export()
            elif action.startswith("Continue review"):
                try:
                    self._do_review(logger, *args, **kwargs)
                except KeyboardInterrupt:
                    pass
    def _get_labels_paper(self, index, stat_str=None, ask_stop=False):
        """Ask the user for a label for a particular paper.

        Arguments
        ---------
        index: int
            Paper ID in the dataset.
        stat_str: str
            Display this (statistic) string under the paper.
        ask_stop: bool
            Ask for confirmation when stopping.
        """
        # CLI paper format
        self.as_data.print_record(index)
        if stat_str is not None:
            print(stat_str + "\n")

        action = questionary.select(
            'Include or Exclude?',
            choices=[
                'Exclude', 'Include',
                questionary.Separator(), 'Back to main menu'
            ],
            default='Exclude',
        ).ask()

        if action == "Include":
            label = 1
        elif action == "Exclude":
            label = 0
        else:
            label = None

        return label
Example #3
0
    def prompt_group(self, group_id, group_obj):
        """
        Prompt for edits to a group of parameters (subschema in 'definitions')

        Args:
          group_id: Paramater ID (string)
          group_obj: JSON Schema keys (dict)

        Returns:
          Dict of param_id:val answers
        """
        while_break = False
        answers = {}
        while not while_break:
            question = {
                "type": "list",
                "name": group_id,
                "message": group_obj.get("title", group_id),
                "choices": ["Continue >>",
                            questionary.Separator()],
            }

            for param_id, param in group_obj["properties"].items():
                if not param.get("hidden", False) or self.show_hidden:
                    q_title = param_id
                    if param_id in answers:
                        q_title += "  [{}]".format(answers[param_id])
                    elif "default" in param:
                        q_title += "  [{}]".format(param["default"])
                    question["choices"].append(
                        questionary.Choice(title=q_title, value=param_id))

            # Skip if all questions hidden
            if len(question["choices"]) == 2:
                return {}

            self.print_param_header(group_id, group_obj)
            answer = questionary.unsafe_prompt([question],
                                               style=nfcore_question_style)
            if answer[group_id] == "Continue >>":
                while_break = True
                # Check if there are any required parameters that don't have answers
                for p_required in group_obj.get("required", []):
                    req_default = self.schema_obj.input_params.get(
                        p_required, "")
                    req_answer = answers.get(p_required, "")
                    if req_default == "" and req_answer == "":
                        log.error("'--{}' is required.".format(p_required))
                        while_break = False
            else:
                param_id = answer[group_id]
                is_required = param_id in group_obj.get("required", [])
                answers.update(
                    self.prompt_param(param_id,
                                      group_obj["properties"][param_id],
                                      is_required, answers))

        return answers
def review_oracle(dataset, *args, log_file=None, **kwargs):
    """CLI to the interactive mode."""
    if log_file is None:
        while True:
            log_file = questionary.text(
                'Please provide a file to store '
                'the results of your review:',
                validate=lambda val: splitext(val)[1] in LOGGER_EXTENSIONS,
            ).ask()
            if log_file is None:
                return
            if len(log_file) == 0:
                force_continue = questionary.confirm(
                    'Are you sure you want to continue without saving?',
                    default=False
                ).ask()
                if force_continue:
                    log_file = None
                    break
            else:
                if os.path.isfile(log_file):
                    action = questionary.select(
                        f'File {log_file} exists, what do you want'
                        ' to do?',
                        default='Exit',
                        choices=[
                            f'Continue review from {log_file}',
                            f'Delete review in {log_file} and start a new'
                            ' review',
                            f'Choose another file name.',
                            questionary.Separator(),
                            f'Exit'
                        ]
                    ).ask()
                    if action == "Exit" or action is None:
                        return
                    if action.startswith("Continue"):
                        break
                    if action.startswith("Choose another"):
                        continue
                    if action.startswith("Delete"):
                        delete = questionary.confirm(
                            f'Are you sure you want to delete '
                            f'{log_file}?',
                            default=False,
                        ).ask()
                        if delete:
                            os.remove(log_file)
                            break
                        else:
                            continue

                break
    try:
        review(dataset, *args, mode='oracle', log_file=log_file, **kwargs)
    except KeyboardInterrupt:
        print('\nClosing down the automated systematic review.')
Example #5
0
def main(dry_run):
    action = q.select(
        "What do you want to do?",
        choices=[
            {
                "name": "init boilerplate",
                "value": INIT_BOILERPLATE
            },
            {
                "name": "create a new challenge",
                "value": NEW_CHALLENGE
            },
        ],
    ).ask()

    init_boilerplate = q.select(
        "Which boilerplate do you want to use?",
        choices=[
            q.Separator("---API---"),
            *APIS,
            q.Separator("---langs---"),
            *LANGS,
            q.Separator("---SPA---"),
            *SPAS,
        ],
    ).ask()

    api_choices = [q.Choice(x, checked=True)
                   for x in APIS] if init_boilerplate in APIS else APIS
    lang_choices = [q.Choice(x, checked=True)
                    for x in LANGS] if init_boilerplate in LANGS else LANGS
    spa_choices = [q.Choice(x, checked=True)
                   for x in SPAS] if init_boilerplate in SPAS else SPAS

    while action == NEW_CHALLENGE:
        allowed_boilerplates = q.checkbox(
            "Which boilerplates are candidates allowed to use?",
            choices=[
                q.Separator("---API---"),
                *api_choices,
                q.Separator("---langs---"),
                *lang_choices,
                q.Separator("---SPA---"),
                *spa_choices,
            ],
        ).ask()

        confirm_none = q.confirm(
            "No allowed boilerplates. Are you sure?",
            default=False,
        ).skip_if(len(allowed_boilerplates)).ask()

        if len(allowed_boilerplates) or confirm_none:
            break

    validate_bp_dir(init_boilerplate)
    sync_bp_dir(init_boilerplate, dry_run)
    def _papers_from_finder(self, logger):
        "Find papers using a fuzzy finder in the available records."
        keywords = questionary.text(
            'Find papers using keywords/authors/title:').ask()

        if keywords is None:
            return

        paper_idx = self.as_data.fuzzy_find(keywords, exclude=self.train_idx)

        # Get the (possibly) relevant papers.
        choices = []
        for idx in paper_idx:
            choices.append(self.as_data.preview_record(idx))
        choices.extend([questionary.Separator(), "Return"])

        # Stay in the same menu until no more options are left
        while len(choices) > 2:
            new_choice = questionary.select(
                'Choose a paper to review:',
                choices=choices,
            ).ask()

            if new_choice == "Return" or new_choice is None:
                return
            choice_idx = choices.index(new_choice)
            idx = paper_idx[choice_idx]

            # Get the label for the selected paper.
            label = self._get_labels_paper(idx, ask_stop=False)
            if label is not None:
                self.classify([idx], [label], logger, method="initial")

                # Remove the selected choice from the list.
                del choices[choice_idx]
                del paper_idx[choice_idx]
        return
Example #7
0
def setup_fab_data(clay_bullets):
    """Check for placed bullets in JSON.

    Parameters
    ----------
    clay_bullets : list of :class:`compas_rcf.fabrication.clay_objs.ClayBullet`
        Original list of ClayBullets.

    Returns
    -------
    list of :class:`compas_rcf.fabrication.clay_objs.ClayBullet`
        Curated list of ClayBullets
    """
    maybe_placed = [
        bullet for bullet in clay_bullets if bullet.placed is not None
    ]

    if len(maybe_placed) < 1:
        return clay_bullets

    last_placed = max(maybe_placed, key=attrgetter("bullet_id"))
    last_placed_index = clay_bullets.index(last_placed)

    log.info("Last bullet placed was {:03}/{:03} with id {}.".format(
        last_placed_index, len(clay_bullets), last_placed.bullet_id))

    skip_options = questionary.select(
        "Some or all bullet seems to have been placed already.",
        [
            "Skip all bullet marked as placed in JSON file.",
            "Place all anyways.",
            questionary.Separator(),
            "Place some of the bullets.",
        ],
    ).ask()

    if skip_options == "Skip all bullet marked as placed in JSON file.":
        to_place = [
            bullet for bullet in clay_bullets if bullet not in maybe_placed
        ]
    if skip_options == "Place all anyways.":
        to_place = clay_bullets[:]
    if skip_options == "Place some of the bullets.":
        skip_method = questionary.select(
            "Select method:",
            ["Place last N bullets again.", "Pick bullets to place again."],
        ).ask()
        if skip_method == "Place last N bullets again.":
            n_place_again = questionary.text(
                "Number of bullets from last to place again?",
                "1",
                lambda val: val.isdigit() and -1 < int(val) <
                last_placed_index,
            ).ask()
            to_place = clay_bullets[last_placed_index - int(n_place_again) +
                                    1:]
            log.info(
                "Placing last {} bullets again. First bullet will be id {}.".
                format(
                    n_place_again,
                    to_place[0].bullet_id,
                ))
        else:
            to_place_selection = questionary.checkbox(
                "Select bullets:",
                [
                    "{:03} (id {}), marked placed: {}".format(
                        i, bullet.bullet_id, bullet.placed is not None)
                    for i, bullet in enumerate(clay_bullets)
                ],
            ).ask()
            indices = [int(bullet.split()[0]) for bullet in to_place_selection]
            to_place = [clay_bullets[i] for i in indices]

    return to_place
Example #8
0
    def prompt_group(self, group_id, group_obj):
        """
        Prompt for edits to a group of parameters (subschema in 'definitions')

        Args:
          group_id: Paramater ID (string)
          group_obj: JSON Schema keys (dict)

        Returns:
          Dict of param_id:val answers
        """
        while_break = False
        answers = {}
        error_msgs = []
        while not while_break:

            if len(error_msgs) == 0:
                self.print_param_header(group_id, group_obj, True)

            question = {
                "type": "list",
                "name": group_id,
                "qmark": "",
                "message": "",
                "instruction": " ",
                "choices": ["Continue >>",
                            questionary.Separator()],
            }

            # Show error messages if we have any
            for msg in error_msgs:
                question["choices"].append(
                    questionary.Choice(
                        [("bg:ansiblack fg:ansired bold", " error "),
                         ("fg:ansired", f" - {msg}")],
                        disabled=True))
            error_msgs = []

            for param_id, param in group_obj["properties"].items():
                if not param.get("hidden", False) or self.show_hidden:
                    q_title = [("", "{}  ".format(param_id))]
                    # If already filled in, show value
                    if param_id in answers and answers.get(
                            param_id) != param.get("default"):
                        q_title.append(("class:choice-default-changed",
                                        "[{}]".format(answers[param_id])))
                    # If the schema has a default, show default
                    elif "default" in param:
                        q_title.append(("class:choice-default",
                                        "[{}]".format(param["default"])))
                    # Show that it's required if not filled in and no default
                    elif param_id in group_obj.get("required", []):
                        q_title.append(("class:choice-required", "(required)"))
                    question["choices"].append(
                        questionary.Choice(title=q_title, value=param_id))

            # Skip if all questions hidden
            if len(question["choices"]) == 2:
                return {}

            answer = questionary.unsafe_prompt(
                [question], style=nf_core.utils.nfcore_question_style)
            if answer[group_id] == "Continue >>":
                while_break = True
                # Check if there are any required parameters that don't have answers
                for p_required in group_obj.get("required", []):
                    req_default = self.schema_obj.input_params.get(
                        p_required, "")
                    req_answer = answers.get(p_required, "")
                    if req_default == "" and req_answer == "":
                        error_msgs.append(f"`{p_required}` is required")
                        while_break = False
            else:
                param_id = answer[group_id]
                is_required = param_id in group_obj.get("required", [])
                answers = self.prompt_param(param_id,
                                            group_obj["properties"][param_id],
                                            is_required, answers)

        return answers
Example #9
0
def main():
    # with open('test.yml', 'r') as stream:
    #     questions = load(stream, Loader=Loader)
    # print(questions)

    questions = [{
        'type': 'text',
        'name': 'firstname',
        'message': "Specify your first name:",
    }, {
        'type': 'text',
        'name': 'lastname',
        'message': "Specify your last name",
    }, {
        'type': 'text',
        'name': 'lastname',
        'message': "What's your email address",
        'default': '*****@*****.**'
    }, {
        'type': 'select',
        'name': 'package_manager',
        'message': "Which package manager would you like to use",
        'choices': ['pipenv', 'poetry', 'conda'],
        'default': 'pipenv'
    }, {
        'type': 'text',
        'name': 'shell',
        'message': "Specify the shell you use",
        'default': '/bin/bash'
    }, {
        'type': 'text',
        'name': 'project_name',
        'message': "Specify the name of your project",
    }, {
        'type':
        'text',
        'name':
        'project_slug',
        'message':
        "Specify the your projects slug name",
        'default':
        lambda x: x['project_name'].lower().replace(' ', '-')
    }, {
        'type': 'text',
        'name': 'project_description',
        'message': "Give a short description of your project"
    }, {
        'type': 'text',
        'name': 'project_homepage',
        'message': "Specify the homepage of your project",
        'validate': validate_url
    }, {
        'type': 'text',
        'name': 'keywords',
        'message': "Specify up to five keywords for your project"
    }, {
        'type': 'select',
        'name': 'license',
        'message': "Select a license for your project",
        'choices': ["MIT", "Apache-2.0", "BSD-3-Clause"],
        'default': 'MIT'
    }, {
        'type': 'autocomplete',
        'name': 'python_version',
        'message': "Specify your python version",
        'choices': get_python_versions()
    }, {
        'type': 'text',
        'name': 'package_version',
        'message': "Specify your package version",
        'default': "0.1.0",
        'validate': validate_package_version
    }, {
        'type':
        'checkbox',
        'name':
        'services',
        'message':
        'Which additional services would you like to use',
        'choices':
        ["Git-SCM", "Docker", "JupyterLab", "Continuous Integration (CI)"]
    }]

    git_questions = [
        {
            'type': 'select',
            'name': 'scm_server',
            'message': "Which source control server would you like to use",
            'choices': ["GitHub", "Gitlab",
                        questionary.Separator(), "other"],
        },
        {
            'type': 'text',
            'name': 'scm_server_url',
            'message': "Specify the URL of your git server",
            'when': lambda x: x['scm_server'] == 'other'
        },
        {
            'type': 'text',
            'name': 'git_username',
            'message': "Specify your GitHub username",
            'when': lambda x: x['scm_server'] == 'GitHub'
        },
        {
            'type': 'text',
            'name': 'git_username',
            'message': "Specify your Gitlab username",
            'when': lambda x: x['scm_server'] == 'Gitlab'
        },
        {
            'type': 'password',
            'name': 'scm_token',
            'message': "Specify an access token for your chosen git server"
        },
    ]

    docker_questions = [{
        'type':
        'select',
        'name':
        'docker_registry',
        'message':
        "Which docker registry would you like to use",
        'choices': ["DockerHub", questionary.Separator(), "other"],
    }, {
        'type': 'text',
        'name': 'docker_registry_url',
        'message': "Specify the URL of your docker registry",
        'when': lambda x: x['docker_registry'] == 'other'
    }, {
        'type': 'password',
        'name': 'docker_token',
        'message': "Specify an access token for your registry",
    }]

    ci_questions = [
        {
            'type': 'select',
            'name': 'ci_tool',
            'message': "Which CI tool would you like to use",
            'choices':
            ["Travis CI", "Gitlab CI",
             questionary.Separator(), "None"],
        },
    ]

    answers = questionary.prompt(questions)
    git_answers = None
    docker_answers = None
    ci_answers = None

    if 'Git-SCM' in answers["services"]:
        git_answers = questionary.prompt(git_questions)
    if 'Docker' in answers["services"]:
        docker_answers = questionary.prompt(docker_questions)
    if 'Continuous Integration (CI)' in answers["services"]:
        ci_answers = questionary.prompt(ci_questions)

    print(answers)
    print(git_answers)
    print(docker_answers)
    print(ci_answers)
Example #10
0
def config_cmd(path, config):
    projects = config.get("projects", [])
    whitelist = config.get("whitelist", [])

    choices = []
    for dir_name, subdir_list, file_list in os.walk(path, followlinks=False):
        project_types = get_project_types(dir_name)
        if project_types:
            choices.append(questionary.Separator(f"== Path: {dir_name} =="))
            for project_type in project_types:
                choices.append({
                    "name": f"{project_type.value} project",
                    "value": {
                        "directory": dir_name,
                        "type": project_type.value
                    },
                })

    default_licenses = [
        "MIT",
        "Apache-2.0",
        "BSD",
        "GPL-2.0",
        "GPL-3.0",
        "LGPL-3.0",
        "VSPL",
        "MPL-2.0",
        "FreeBSD",
        "Zlib",
        "AFL",
        "X11",
        "JSON",
    ]

    questions = [
        {
            "type": "confirm",
            "message":
            "Do you want to modify project paths to be watched by this tool?",
            "name": "change_projects",
            "default": False,
            "when": lambda x: len(projects) > 0,
        },
        {
            "type": "checkbox",
            "qmark": "📦",
            "message": "Select projects to watch",
            "name": "projects",
            "choices": choices,
            "when": lambda x: len(projects) == 0 or x.get("change_projects"),
        },
        {
            "type":
            "checkbox",
            "qmark":
            "📋",
            "message":
            "Select licenses which are OK to be used for your project",
            "name":
            "whitelist",
            "choices": [{
                "name": license,
                "checked": license in whitelist
            } for license in default_licenses],
        },
    ]
    answers = questionary.prompt(questions)

    if "projects" in answers:
        config["projects"] = answers["projects"]
    if "whitelist" in answers:
        config["whitelist"] = answers["whitelist"]

    if write_config(path, config):
        print("Successfully generated .license-sh.json file")