Пример #1
0
    def build_schema(self, pipeline_dir, no_prompts, web_only, url):
        """Interactively build a new pipeline schema for a pipeline"""

        if no_prompts:
            self.no_prompts = True
        if web_only:
            self.web_only = True
        if url:
            self.web_schema_build_url = url

        # Get pipeline schema filename
        try:
            self.get_schema_path(pipeline_dir, local_only=True)
        except AssertionError:
            log.info("No existing schema found - creating a new one from the nf-core template")
            self.get_wf_params()
            self.make_skeleton_schema()
            self.remove_schema_notfound_configs()
            self.add_schema_found_configs()
            try:
                self.validate_schema()
            except AssertionError as e:
                log.error("[red]Something went wrong when building a new schema:[/] {}".format(e))
                log.info("Please ask for help on the nf-core Slack")
                return False
        else:
            # Schema found - load and validate
            try:
                self.load_lint_schema()
            except AssertionError as e:
                log.error("Existing pipeline schema found, but it is invalid: {}".format(self.schema_filename))
                log.info("Please fix or delete this file, then try again.")
                return False

        if not self.web_only:
            self.get_wf_params()
            self.remove_schema_notfound_configs()
            self.add_schema_found_configs()
            self.save_schema()

        # If running interactively, send to the web for customisation
        if not self.no_prompts:
            if Confirm.ask(":rocket:  Launch web builder for customisation and editing?"):
                try:
                    self.launch_web_builder()
                except AssertionError as e:
                    log.error(e.args[0])
                    # Extra help for people running offline
                    if "Could not connect" in e.args[0]:
                        log.info(
                            "If you're working offline, now copy your schema ({}) and paste at https://nf-co.re/pipeline_schema_builder".format(
                                self.schema_filename
                            )
                        )
                        log.info("When you're finished, you can paste the edited schema back into the same file")
                    if self.web_schema_build_web_url:
                        log.info(
                            "To save your work, open {}\n"
                            "Click the blue 'Finished' button, copy the schema and paste into this file: {}".format(
                                self.web_schema_build_web_url, self.schema_filename
                            )
                        )
                    return False
Пример #2
0
    config['LANGUAGE'] = LANGUAGE

    basarili(LANG['SUCCESS_CONFIG'])
    bilgi(LANG['OPENING_DYNO'])

    try:
        app.process_formation()["worker"].scale(1)
    except:
        hata(LANG['ERROR_DYNO'])
        exit(1)

    basarili(LANG['OPENED_DYNO'])
    basarili(LANG['SUCCESS_DEPLOY'])
    tamamlandi(time() - baslangic)

    Sonra = Confirm.ask(f"[bold yellow]{LANG['AFTERDEPLOY']}[/]", default=True)
    if Sonra == True:
        BotLog = False
        Cevap = ""
        while not Cevap == "4":
            if Cevap == "1":
                bilgi(LANG['OPENING_BOTLOG'])

                KanalId = loop.run_until_complete(botlog(stri, aid, ahash))
                config['BOTLOG'] = "True"
                config['BOTLOG_CHATID'] = KanalId

                basarili(LANG['OPENED_BOTLOG'])
                BotLog = True
            elif Cevap == "3":
                if BotLog:
Пример #3
0
def sci_dl_init_config():
    """
    initialize sci-dl configuration
    """
    try:
        console = Console()
        # base_url
        while True:
            base_url = Prompt.ask(_('SciHub base url'),
                                  default=DEFAULT_BASE_URL)
            if validators.url(base_url):
                break
            console.log(_('Invalid base_url %s') % base_url)

        # retries
        while True:
            retries = IntPrompt.ask(_('Number of failure download retries'),
                                    default=5)
            if MIN_RETRIES <= retries <= MAX_RETRIES:
                break
            console.log(
                _('invalid number of failure download retries %s, '
                  'must between %s and %s') %
                (retries, MIN_RETRIES, MAX_RETRIES))

        # use_proxy
        use_proxy = Confirm.ask(_('Do you want to use a proxy?'), default=True)
        proxy_protocol = DEFAULT_PROXY_PROTOCOL
        proxy_user = DEFAULT_PROXY_USER
        proxy_password = DEFAULT_PROXY_PASSWORD
        proxy_host = DEFAULT_PROXY_HOST
        proxy_port = DEFAULT_PROXY_PORT
        if use_proxy:
            # proxy_protocol
            proxy_protocol = Prompt.ask(_('Protocol of your proxy'),
                                        choices=PROXY_PROTOCOLS,
                                        default=DEFAULT_PROXY_PROTOCOL)

            # proxy_user
            proxy_user = Prompt.ask(
                _('User of your proxy, leave blank if not need'),
                default=DEFAULT_PROXY_USER)

            # proxy_password
            proxy_password = Prompt.ask(
                _('Password of your proxy, leave blank if not need'),
                password=True,
                default=DEFAULT_PROXY_PASSWORD,
            )

            # proxy_host
            while True:
                proxy_host = Prompt.ask(_('Host of your proxy'),
                                        default=DEFAULT_PROXY_HOST)
                if validators.domain(proxy_host) or validators.ipv4(
                        proxy_host) or validators.ipv4(proxy_host):
                    break
                console.log(_('Invalid host %s') % proxy_host)

            # proxy port
            while True:
                proxy_port = IntPrompt.ask(_('Port of your proxy'),
                                           default=DEFAULT_PROXY_PORT)
                if 1 <= proxy_port <= 65535:
                    break
                console.log(
                    _('Invalid port %s, should between 1 and 65535') %
                    proxy_port)

        # log file
        while True:
            log_file = Prompt.ask(_('Log file'), default=DEFAULT_LOG_FILE)
            try:
                log_directory = dirname(log_file)
                if not exists(log_directory):
                    makedirs(log_directory)
                break
            except Exception:
                console.log(_('Invalid log file %s') % log_file)

        # 输出目录
        while True:
            outdir = Prompt.ask(_('Where you want to save PDF file'),
                                default=expanduser('~'))
            if exists(outdir):
                break
            console.log(_('Invalid directory %s') % outdir)

        # 是否打开调试模式
        debug_mode = Confirm.ask(_('Enable DEBUG mode?'), default=False)
        config = Config({
            'base_url': base_url,
            'retries': retries,
            'use_proxy': use_proxy,
            'proxy_protocol': proxy_protocol,
            'proxy_user': proxy_user,
            'proxy_password': proxy_password,
            'proxy_host': proxy_host,
            'proxy_port': proxy_port,
            'log_file': log_file,
            'outdir': outdir,
            'debug_mode': debug_mode
        })
        config.write(CONFIG_FILE)
        console.log(
            _('Configurations saved, you can edit "%s" if needed.') %
            CONFIG_FILE)
    except SciDlError as e:
        logger.exception(e)
        raise click.UsageError(e)
    except Exception as e:
        logger.exception(e)
        raise click.UsageError(UNKNOWN_ERROR_MSG)
    return 0
Пример #4
0
def ipcores(ictx, aXilSimLibsPath, aToScript, aToStdout):
    '''
    Generate the vivado libraries and cores required to simulate the current design.

    '''
    lSessionId = 'ipcores'
    lIPCoresModelsimIni = 'modelsim.ipcores.ini'

    lDryRun = aToScript or aToStdout
    lScriptPath = aToScript if not aToStdout else None

    # Use compiler executable to detect Modelsim's flavour
    lSimVariant, lSimVersion = ictx.siminfo
    lSimulator = lSimVariant.lower()

    if lSimulator in ['questasim']:
        lSimulator = 'questa'

    # For questa and modelsim the simulator name is the variant name in lowercase
    cprint(f"[blue]{lSimVariant}[/blue] detected")
    cprint(f'Using simulator: {lSimVariant} {lSimVersion}', style='green')

    # Guess the current vivado version from environment
    if ictx.vivadoinfo is None:
        raise click.ClickException(
            "Missing Vivado environment. Please source the veivado environment and try again"
        )

    lVivadoVariant, lVivadoVersion = ictx.vivadoinfo
    cprint(f"Using Vivado version: {lVivadoVersion}", style='green')

    # -------------------------------------------------------------------------
    # Store the target path in the ictx, for it to be retrieved by Vivado
    # i.e. .xilinx_sim_libs/2017.4/modelsim_106.c
    lSimlibPath = simlibPath(ictx, aXilSimLibsPath)

    cprint(f"Using Xilinx simulation library path: [blue]{lSimlibPath}[/blue]")

    if not exists(lSimlibPath):
        cprint(
            f"WARNING: Xilinx simulation libraries not found. Likely this is a problem.\nPlease execute {getClickRootName()} sim setup-simlibs to generate them.",
            style='yellow')
        if not Confirm.ask("Do you want to continue anyway?"):
            return
    # -------------------------------------------------------------------------

    lDepFileParser = ictx.depParser

    # Ensure that no parsing errors are present
    ensureNoParsingErrors(ictx.currentproj.name, lDepFileParser)

    # Ensure that all dependencies are resolved
    ensureNoMissingFiles(ictx.currentproj.name, lDepFileParser)

    lIPCores = findIPSrcs(lDepFileParser.commands["src"])

    if not lIPCores:
        cprint("WARNING: No ipcore files detected in this project",
               style='yellow')
        return
    else:
        cprint("List of ipcores in project")
        for lIPCore in lIPCores:
            cprint(f"- [blue]{lIPCore}[/blue]")
    # -------------------------------------------------------------------------

    # For questa and modelsim the simulator name is the variant name in lowercase
    lIPCoreSimMaker = IPCoresSimGenerator(ictx.currentproj, lSimlibPath,
                                          lSimulator, kIPExportDir,
                                          kIPVivadoProjName)

    cprint("Generating ipcore simulation code", style='blue')

    try:
        with (
                # Pipe commands to Vivado console
                xilinx.VivadoSession(sid=lSessionId)
                if not lDryRun else SmartOpen(lScriptPath)) as lVivadoConsole:

            lIPCoreSimMaker.write(
                lVivadoConsole,
                lDepFileParser.settings,
                lDepFileParser.packages,
                lDepFileParser.commands,
                lDepFileParser.libs,
            )
    except xilinx.VivadoConsoleError as lExc:
        logVivadoConsoleError(lExc)
        raise click.Abort()
    except RuntimeError as lExc:
        cprint(
            f"Error caught while generating Vivado TCL commands: {lExc}",
            style='red',
        )
        raise click.Abort()

    # Copy the generated modelsim ini file locally, with a new name
    shutil.copy(join(lSimlibPath, 'modelsim.ini'),
                join(os.getcwd(), lIPCoresModelsimIni))
    cprint(
        f"Imported modelsim.ini from {lSimlibPath} to {lIPCoresModelsimIni}",
        style='blue',
    )

    # Prepare the area where to compile the simulation
    lIPSimDir = join(kIPExportDir, lSimulator)
    # Create the target directory for the code simulation
    mkdir(join(lIPSimDir, '{0}_lib'.format(lSimulator), 'work'))
    # and copy the simlibrary config file into it
    shutil.copy(join(lSimlibPath, 'modelsim.ini'), lIPSimDir)

    # Compile
    cprint("Compiling ipcores simulation", style='blue')

    with mentor.ModelSimBatch(echo=aToStdout, dryrun=lDryRun,
                              cwd=lIPSimDir) as lSim:
        lSim('do compile.do')

    # ----------------------------------------------------------
    # Collect the list of libraries generated by ipcores to add them to
    # modelsim.ini
    lVivadoYear = [int(v) for v in lVivadoVersion.split('.')]

    if lVivadoYear[0] >= 2017:
        # Vivado 2017 requires an additional folder on the simulation path
        lCoreSimDir = abspath(
            join(kIPExportDir, lSimulator, '{0}_lib'.format(lSimulator),
                 'msim'))
    else:
        # Vivado 2016<
        lCoreSimDir = abspath(join(kIPExportDir, lSimulator, 'msim'))

    if not exists(lCoreSimDir):
        raise click.ClickException("Simlib directory not found")

    lSimLibs = next(os.walk(lCoreSimDir))[1]
    cprint(f"Detected simulation libraries: [blue]{lSimLibs}[/blue]")

    # add newly generated libraries to modelsim.ini
    cprint('Adding generated simulation libraries to modelsim.ini')
    from configparser import RawConfigParser

    lIniParser = RawConfigParser()
    lIniParser.read(lIPCoresModelsimIni, DEFAULT_ENCODING)
    for lSimLib in lSimLibs:
        cprint(f" - {lSimLib}")
        lIniParser.set('Library', lSimLib, join(lCoreSimDir, lSimLib))

    lLibSearchPaths = (lIniParser.get(
        'vsim', 'librarysearchpath').split() if lIniParser.has_option(
            'vsim', 'librarysearchpath') else [])

    lLibSearchPaths += lSimLibs

    lNoDups = []
    for lSimLib in lLibSearchPaths:
        if lSimLib in lNoDups:
            continue
        lNoDups.append(lSimLib)

    lIniParser.set('vsim', 'librarysearchpath', ' '.join(lNoDups))

    # Make a backup copy of modelsim.ini (generated by ipcores)
    with SmartOpen('modelsim.ini') as newIni:
        lIniParser.write(newIni.target)
Пример #5
0
    def new(self) -> None:
        """Create a new executor folder interactively."""

        from rich import box, print
        from rich.panel import Panel
        from rich.progress import track
        from rich.prompt import Confirm, Prompt
        from rich.syntax import Syntax
        from rich.table import Table

        console = get_rich_console()

        print(
            Panel.fit(
                '''
[bold green]Executor[/bold green] is how Jina processes [bold]Document[/bold].

This guide helps you to create your own Executor in 30 seconds.''',
                title='Create New Executor',
            ))

        exec_name = (self.args.name if self.args.name else Prompt.ask(
            ':grey_question: What is the [bold]name[/bold] of your executor?\n'
            '[dim]CamelCase is required[/dim]',
            default=f'MyExecutor{random.randint(0, 100)}',
        ))

        exec_path = (self.args.path if self.args.path else Prompt.ask(
            ':grey_question: [bold]Which folder[/bold] to store your executor?',
            default=os.path.join(os.getcwd(), exec_name),
        ))
        exec_description = '{{}}'
        exec_keywords = '{{}}'
        exec_url = '{{}}'

        is_dockerfile = False

        if self.args.advance_configuration or Confirm.ask(
                '[green]That\'s all we need to create an Executor![/green]\n'
                ':grey_question: Or do you want to proceed to advanced configuration',
                default=False,
        ):
            exec_description = (
                self.args.description if self.args.description else
                (Prompt.ask(
                    ':grey_question: Please give a [bold]short description[/bold] of your executor?\n'
                    f'[dim]Example: {exec_name} embeds images into 128-dim vectors using ResNet.[/dim]'
                )))

            exec_keywords = (self.args.keywords if self.args.keywords else (
                Prompt.ask(
                    ':grey_question: Please give some [bold]keywords[/bold] to help people search your executor [dim](separated by comma)[/dim]\n'
                    f'[dim]Example: image cv embedding encoding resnet[/dim]'))
                             )

            exec_url = (self.args.url if self.args.url else (Prompt.ask(
                ':grey_question: What is the [bold]URL[/bold] for GitHub repo?\n'
                f'[dim]Example: https://github.com/yourname/my-executor[/dim]')
                                                             ))

            print(
                Panel.fit(
                    '''
[bold]Dockerfile[/bold] describes how this executor will be built. It is useful when
your executor has non-trivial dependencies or must be run under certain environment.

- If the [bold]Dockerfile[/bold] is missing, Jina automatically generates one for you.
- If you provide one, then Jina will respect the given [bold]Dockerfile[/bold].''',
                    title='[Optional] [bold]Dockerfile[/bold]',
                    width=80,
                ))

            is_dockerfile = self.args.add_dockerfile or Confirm.ask(
                ':grey_question: Do you need to write your own [bold]Dockerfile[/bold] instead of the auto-generated one?',
                default=False,
            )
            print('[green]That\'s all we need to create an Executor![/green]')

        def mustache_repl(srcs):
            for src in track(srcs,
                             description=f'Creating {exec_name}...',
                             total=len(srcs)):
                with open(
                        os.path.join(__resources_path__, 'executor-template',
                                     src)) as fp, open(
                                         os.path.join(exec_path, src),
                                         'w') as fpw:
                    f = (fp.read().replace('{{exec_name}}', exec_name).replace(
                        '{{exec_description}}', exec_description).replace(
                            '{{exec_keywords}}',
                            str(exec_keywords.split(','))).replace(
                                '{{exec_url}}', exec_url))

                    f = [
                        v + '\n' for v in f.split('\n')
                        if not ('{{' in v or '}}' in v)
                    ]
                    fpw.writelines(f)

        Path(exec_path).mkdir(parents=True, exist_ok=True)
        pkg_files = [
            'executor.py',
            'manifest.yml',
            'README.md',
            'requirements.txt',
            'config.yml',
        ]

        if is_dockerfile:
            pkg_files.append('Dockerfile')

        mustache_repl(pkg_files)

        table = Table(box=box.SIMPLE)
        table.add_column('Filename', style='cyan', no_wrap=True)
        table.add_column('Description', no_wrap=True)

        # adding the columns in order of `ls` output
        table.add_row(
            'config.yml',
            'The YAML config file of the Executor. You can define [bold]__init__[/bold] arguments using [bold]with[/bold] keyword.',
        )

        table.add_row(
            '',
            Panel(
                Syntax(
                    f'''
jtype: {exec_name}
with:
    foo: 1
    bar: hello
metas:
    py_modules:
        - executor.py
                ''',
                    'yaml',
                    theme='monokai',
                    line_numbers=True,
                    word_wrap=True,
                ),
                title='config.yml',
                width=50,
                expand=False,
            ),
        )

        if is_dockerfile:
            table.add_row(
                'Dockerfile',
                'The Dockerfile describes how this executor will be built.',
            )

        table.add_row('executor.py', 'The main logic file of the Executor.')
        table.add_row(
            'manifest.yml',
            'Metadata for the Executor, for better appeal on Jina Hub.',
        )

        manifest_fields_table = Table(box=box.SIMPLE)
        manifest_fields_table.add_column('Field', style='cyan', no_wrap=True)
        manifest_fields_table.add_column('Description', no_wrap=True)
        manifest_fields_table.add_row('name',
                                      'Human-readable title of the Executor')
        manifest_fields_table.add_row(
            'description', 'Human-readable description of the Executor')
        manifest_fields_table.add_row(
            'url',
            'URL to find more information on the Executor (e.g. GitHub repo URL)',
        )
        manifest_fields_table.add_row(
            'keywords', 'Keywords that help user find the Executor')

        table.add_row('', manifest_fields_table)
        table.add_row('README.md', 'A usage guide of the Executor.')
        table.add_row('requirements.txt',
                      'The Python dependencies of the Executor.')

        final_table = Table(box=None)

        final_table.add_row(
            'Congrats! You have successfully created an Executor! Here are the next steps:'
        )

        p0 = Panel(
            Syntax(
                f'cd {exec_path}\nls',
                'console',
                theme='monokai',
                line_numbers=True,
                word_wrap=True,
            ),
            title='1. Check out the generated Executor',
            width=120,
            expand=False,
        )

        p1 = Panel(
            table,
            title='2. Understand folder structure',
            width=120,
            expand=False,
        )

        p2 = Panel(
            Syntax(
                f'jina hub push {exec_path}',
                'console',
                theme='monokai',
                line_numbers=True,
                word_wrap=True,
            ),
            title='3. Share it to Jina Hub',
            width=120,
            expand=False,
        )

        final_table.add_row(p0)
        final_table.add_row(p1)
        final_table.add_row(p2)

        p = Panel(
            final_table,
            title=':tada: Next steps',
            width=130,
            expand=False,
        )
        console.print(p)
Пример #6
0
async def on_ready():
    try:
        console.log(f'Successfully logged in as [green bold]{client.user}[/]')
        guild_count = len(client.guilds)
        with console.status('[bold green]Fetching guild list...') as status:
            sleep(1.5)
            guild_list = Table(title="[magenta bold]Guild List")
            guild_list.add_column('SRL_ID',
                                  justify='right',
                                  style='bold green',
                                  no_wrap=True)
            guild_list.add_column('Guild Name', style='dim')
            guild_list.add_column('Guild_ID', style='bold cyan', no_wrap=True)
            i = 0
            for guild in client.guilds:
                guild_list.add_row(f'{i}', guild.name, f'{guild.id}')
                i += 1
            console.log('Fetched [bold green]guild list')
            console.print(guild_list)

        while True:
            source = IntPrompt.ask(
                f'[[cyan]?[/]]Enter [cyan bold]SRL_ID[/] of the guild [yellow bold]from[/] which to steal emojis [magenta bold](0-{guild_count - 1})[/]'
            )
            if source >= 0 and source < guild_count:
                break
            console.print(
                f'[red]SRL_ID must be between 0 and {guild_count - 1}[/]')

        while True:
            sink = IntPrompt.ask(
                f'[[cyan]?[/]]Enter [cyan bold]SRL_ID[/] of the guild [yellow bold]to[/] which copy the emojis [magenta bold](0-{guild_count - 1})[/]'
            )
            if sink >= 0 and sink < guild_count:
                break
            console.print(
                f'[red]SRL_ID must be between 0 and {guild_count - 1}[/]')

        source_guild = client.guilds[source]
        sink_guild = client.guilds[sink]
        if not sink_guild.me.guild_permissions.manage_emojis:
            console.print(
                f'[[bold red]ERROR[/]][red]You do not have permissions to manage emojis of guild \'{sink_guild.name}\''
            )
            await exit_code()

        with console.status(
                f'[bold green]Fetching emoji list for guild {source_guild.name}...'
        ) as status:
            sleep(1)
            emoji_list = Table(title="[magenta bold]Emoji List")
            emoji_list.add_column('SRL_ID',
                                  justify='right',
                                  style='bold green',
                                  no_wrap=True)
            emoji_list.add_column('Emoji Name', style='dim')
            emoji_list.add_column('Emoji_ID', style='cyan', no_wrap=True)
            emoji_list.add_column('Animated?')
            i = 0
            for emoji in source_guild.emojis:
                emoji_list.add_row(f'{i}', emoji.name, f'{emoji.id}',
                                   'Yes' if emoji.animated else 'No')
                i += 1

            console.log(
                f'Fetched [bold green]emoji list[/] for [dim]{source_guild.name}'
            )
            console.print(emoji_list)

        free_slots = sink_guild.emoji_limit - len(sink_guild.emojis)
        if free_slots == 0:
            console.print(
                f'[[bold red]ERROR[/]][red]Guild {sink_guild.name} has no free emoji slot!'
            )
            await exit_code()

        console.print(
            f'Guild [bold green]{sink_guild.name}[/] has [bold green]{free_slots}[/] free emoji slots.'
        )
        values = Prompt.ask(
            '[[cyan]?[/]]Enter [bold yellow]coma-separated[/] values of [cyan bold]SRL_ID[/] of the emojis to steal [dim](TIP: Type all to steal all emojis)[/]',
            default='all')
        if values == 'all':
            emojis_to_steal = source_guild.emojis
        else:

            def to_emoji(index):
                return source_guild.emojis[int(index.strip())]

            emojis_to_steal = list(map(to_emoji, values.split(',')))

        if len(emojis_to_steal) > free_slots:
            console.print(
                f'[[bold red]ERROR[/]][red]Guild {sink_guild.name} does not have enough free emoji slots!'
            )
            await exit_code()

        transaction = Table(title="[magenta bold]Steal Transactions")
        transaction.add_column('From', style='bold yellow')
        transaction.add_column('To', style='bold yellow')
        transaction.add_column('Emojis Stolen')

        def to_names(emoji):
            return emoji.name

        transaction.add_row(source_guild.name, sink_guild.name,
                            '\n'.join(list(map(to_names, emojis_to_steal))))
        console.print(transaction)

        if not Confirm.ask("[[cyan]?[/]]Apply transactions?", default=True):
            await exit_code()

        with console.status('[bold green]Stealing emojis...') as status:
            for emoji in emojis_to_steal:
                await sink_guild.create_custom_emoji(
                    name=emoji.name,
                    image=await emoji.url.read(),
                    reason='Created using EmojiSteal script.')
                console.print(f'Emoji created: [bold green]{emoji.name}')

        console.log(f'[bold green]Completed stealing emojis!')
        console.print()
        console.print('[cyan]Thanks for using EmojiSteal script!')
        console.print(
            '[cyan]Coded by @DarkGuy10 https://github.com/DarkGuy10/')
        console.print('[i]Ehe te nanayo![/]')
        await exit_code()

    except:
        pass
Пример #7
0
def get_has_models() -> bool:
    return Confirm.ask("Does this cog use models?",
                       default=False,
                       console=console)
Пример #8
0
    def pre_spray_info(self):
        """
        Display spray config table
        """
        spray_info = Table(
            show_header=False,
            show_footer=False,
            min_width=61,
            title=f"Module: {self.module.upper()}",
            title_justify="left",
            title_style="bold reverse",
        )

        spray_info.add_row("Target", f"{self.target.url}")

        if self.domain:
            spray_info.add_row("Domain", f"{self.domain}")

        if self.attempts:
            spray_info.add_row("Interval", f"{self.interval} minutes")
            spray_info.add_row("Attempts", f"{self.attempts} per interval")

        if self.jitter:
            spray_info.add_row("Jitter",
                               f"{self.jitter_min}-{self.jitter} seconds")

        if self.notify:
            spray_info.add_row("Notify", f"True ({self.notify})")

        log_name = pathlib.PurePath(self.log_name)
        out_name = pathlib.PurePath(self.output)
        spray_info.add_row("Logfile", f"{log_name.name}")
        spray_info.add_row("Results", f"{out_name.name}")

        console.print(spray_info)

        print()
        Confirm.ask(
            "[blue]Press enter to begin",
            default=True,
            show_choices=False,
            show_default=False,
        )
        print()

        if self.module == "Smb":
            console.print(f"[*] Initiaing SMB connection to {self.host} ...",
                          style="warning")
            if self.target.get_conn():
                console.print(
                    f'[+] Connected to {self.host} over {"SMBv1" if self.target.smbv1 else "SMBv3"}',
                    style="good",
                )

                console.print(f"\t[>] Hostname: {self.target.hostname} ",
                              style="info")
                console.print(f"\t[>] Domain: {self.target.domain} ",
                              style="info")
                console.print(f"\t[>] OS: {self.target.os} ", style="info")
                print()

            else:
                console.print(f"[!] Failed to connect to {self.host} over SMB",
                              style="danger")
                exit()

        self.target.print_headers(self.output)
Пример #9
0
def gendecoders(ictx, aCheckUpToDate, aForce):

    lDecodersDir = 'decoders'

    with DirSentry(ictx.currentproj.path):
        sh.rm('-rf', lDecodersDir)
        # Gather address tables
        addrtab(ictx, aDest=lDecodersDir)

    lGenScript = 'gen_ipbus_addr_decode'

    if not which(lGenScript):
        raise click.ClickException("'{0}' script not found.".format(lGenScript))

    cprint(f"Using {which(lGenScript)}", style='green')

    # ------------------------------------------------------------------------------

    lUpdatedDecoders = []
    lGen = sh.Command(which(lGenScript))
    lErrors = {}
    with DirSentry(join(ictx.currentproj.path, lDecodersDir)):
        for lAddr in ictx.depParser.commands['addrtab']:
            cprint(f"Processing [blue]{basename(lAddr.filepath)}[/blue]")
            # Interested in top-level address tables only
            if not lAddr.toplevel:
                cprint(
                    f"{lAddr.filepath} is not a top-level address table. Decoder will not be generated.",
                    style='cyan',
                )
                continue

            # Generate a new decoder file
            try:
                lGen(basename(lAddr.filepath), _out=sys.stdout, _err=sys.stderr, _tee=True)
            except Exception as lExc:
                cprint(f"Failed to generate decoder for {basename(lAddr.filepath)}", style='red')
                lErrors[lAddr] = lExc
                continue

            lDecoder = f'ipbus_decode_{splitext(basename(lAddr.filepath))[0]}.vhd'
            lTarget = ictx.pathMaker.getPath(
                lAddr.package, lAddr.component, 'src', lDecoder
            )

            diff = sh.colordiff if which('colordiff') else sh.diff

            # Has anything changed?
            try:
                diff('-u', '-I', '^-- START automatically', lTarget, lDecoder)
            except sh.ErrorReturnCode as e:
                lUpdatedDecoders.append((lDecoder, lTarget))

        if lErrors:
            cprint(
                "\nERROR: decoder generation failed",
                style='red',
            )
            for a in sorted(lErrors):
                cprint(' - ' + basename(a.filepath))
                cprint('   ' + lErrors[a].stdout.decode(DEFAULT_ENCODING, "replace"))
            raise SystemExit(-1)



        # ------------------------------------------------------------------------------
        # If no difference between old and newly generated decoders, quit here.
        if not lUpdatedDecoders:
            console.log(
                f"{ictx.currentproj.name}: All ipbus decoders are up-to-date.",
                style='green',
            )
            return

        # ------------------------------------------------------------------------------
        cprint(
            'The following decoders have changed and must be updated:\n'
            + '\n'.join([f" * [blue]{d}[/blue]" for d in lUpdatedDecoders])
            + '\n'
        )
        if aCheckUpToDate:
            raise SystemExit(-1)

        if not aForce and not Confirm.ask("Do you want to continue?"):
            return

        for lDecoder, lTarget in lUpdatedDecoders:
            cprint(sh.cp('-av', lDecoder, lTarget))

        console.log(
            f"{ictx.currentproj.name}: {len(lUpdatedDecoders)} decoders updated.",
            style='green',
        )
Пример #10
0
APP_NAME = "azdummy"

state = {"verbose": False, "timeStart": None, "resetConfig": True}

console = Console()
traceback.install(show_locals=True)
pretty.install()

app_dir = typer.get_app_dir(APP_NAME)
config_path = Path(app_dir) / "settings.env"


def reset_config():
    """Reset the configuration to default"""
    try:
        Path(app_dir).mkdir(parents=True, exist_ok=True)
        shutil.copy(Path(__file__).parent / "settings.env", config_path)
    except:
        console.print_exception()


if not config_path.exists():
    if Confirm.ask(
            "\n:grimacing face: :thumbs_down_dark_skin_tone: Looks like your first time running azdummy. Would you like initialize a new config?"
    ):
        reset_config()

load_dotenv(config_path, verbose=True)
settings = AzDummySettings(config_path)
Пример #11
0
def command(
    directory: str,
    inifile: str = "idefix.ini",
    duration: float | None = None,
    time_step: float | None = None,
    one_step: bool | None = False,
) -> int:

    input_inifile = inifile
    for loc in [Path.cwd(), Path(directory)]:
        pinifile = (loc / input_inifile).resolve()
        if pinifile.is_file():
            break
    else:
        print_err(f"could not find inifile {input_inifile}")
        return 1
    if one_step:
        if time_step is None:
            time_step = inifix.load(pinifile)["TimeIntegrator"]["first_dt"]
        duration = time_step

    compilation_required = False
    d = Path(directory)
    if not (d / "idefix").is_file():
        if not (d / "Makefile").is_file():
            print_err(
                "No idefix instance or Makefile found in the target directory. "
                "Run `idfx conf` first."
            )
            return 1

        compilation_required = True

    else:
        last_compilation_time = os.stat(d / "idefix").st_mtime
        source_patterns = (
            "**/*.hpp",
            "**/*.cpp",
            "**/*.h",
            "**/*.c",
            "**/CMakeLists.txt",
        )

        files_to_check = files_from_patterns(d, *source_patterns, recursive=True)
        idefix_dir = Path(os.environ["IDEFIX_DIR"])
        try:
            with pushd(idefix_dir):
                git_indexed_idefix_files = [
                    os.path.abspath(_)
                    for _ in subprocess.run(["git", "ls-files"], capture_output=True)
                    .stdout.decode()
                    .split("\n")
                ]
        except subprocess.CalledProcessError:
            # emmit no warning here as Idefix might not be installed as a git copy
            pass
        else:
            files_to_check.extend(
                list(
                    set(git_indexed_idefix_files).intersection(
                        set(files_from_patterns(idefix_dir / "src", *source_patterns))
                    )
                )
            )

        source_edit_times = tuple(
            (file, os.stat(file).st_mtime) for file in files_to_check
        )
        time_deltas = tuple(
            (file, edit_time - last_compilation_time)
            for file, edit_time in source_edit_times
        )
        if updated_since_compilation := tuple(
            file for file, td in time_deltas if td > 0
        ):
            print_warning(
                "The following files were updated since last compilation:",
            )
            print("\n".join(updated_since_compilation), file=sys.stderr)
            compilation_required = Confirm.ask(
                "Would you like to recompile before running the program ?"
            )
Пример #12
0
def ack(k, v):
    resp = Confirm.ask(f"Confirm {k} => {v}?")
    return resp
Пример #13
0
    ENV_FILE["DEPLOY_MODE"] = "local"
    ENV_FILE["META_SERVICE_DOMAIN"] = "127.0.0.1:5565"
    ENV_FILE["META_SERVICE_BASE_URL"] = "http://127.0.0.1:5565"
    ENV_FILE["META_SERVICE_NAME"] = "masz"
    service_base_url = "http://127.0.0.1:5565"
else:
    ENV_FILE["DEPLOY_MODE"] = "domain"
    domain = Prompt.ask(":question_mark: Enter your (sub)domain",
                        default="masz.example.com")
    ENV_FILE["META_SERVICE_DOMAIN"] = domain
    ENV_FILE["META_SERVICE_BASE_URL"] = f"https://{domain}"
    ENV_FILE["META_SERVICE_NAME"] = domain
    ENV_FILE["DEPLOY_DOMAIN"] = domain
    service_base_url = f"https://{domain}"
    Confirm.ask(
        ":exclamation_mark: [bright_black]Be sure to redirect your reverse proxy correctly[/bright_black].\n[bright_black]The docker container will be listening on local port [/bright_black][bright_green]5565[bright_green].",
        default=True)

ENV_FILE["DISCORD_BOT_TOKEN"] = Prompt.ask(
    ":question_mark: Enter your Discord bot token")
ENV_FILE["DISCORD_OAUTH_CLIENT_ID"] = Prompt.ask(
    ":question_mark: Enter your Discord OAuth client ID")
ENV_FILE["DISCORD_OAUTH_CLIENT_SECRET"] = Prompt.ask(
    ":question_mark: Enter your Discord OAuth client secret")

print(
    ":question_mark: Enter the discord id of users that should be site admins.\n[bright_black]It is recommended to be just one. You can enter as many as you want.[/bright_black]"
)
admins = []
while True:
    site_admin = Prompt.ask(
Пример #14
0
def init_customization() -> None:
    configuration = {
        "directories": {
            "mirror_module_path": False,
            "output": "",
            "raster_images": "",
            "vector_images": "",
            "sounds": "",
            "temporary_storage": "",
        },
        "tex": {
            "executable": "",
            "template_file": "",
            "intermediate_filetype": "",
            "text_to_replace": "[tex_expression]",
        },
        "universal_import_line": "from manimlib import *",
        "style": {
            "font": "Consolas",
            "background_color": "",
        },
        "window_position": "UR",
        "window_monitor": 0,
        "full_screen": False,
        "break_into_partial_movies": False,
        "camera_qualities": {
            "low": {
                "resolution": "854x480",
                "frame_rate": 15,
            },
            "medium": {
                "resolution": "1280x720",
                "frame_rate": 30,
            },
            "high": {
                "resolution": "1920x1080",
                "frame_rate": 60,
            },
            "ultra_high": {
                "resolution": "3840x2160",
                "frame_rate": 60,
            },
            "default_quality": "",
        }
    }

    console = Console()
    console.print(Rule("[bold]Configuration Guide[/bold]"))
    # print("Initialize configuration")
    try:
        scope = Prompt.ask("  Select the scope of the configuration",
                           choices=["global", "local"],
                           default="local")

        console.print("[bold]Directories:[/bold]")
        dir_config = configuration["directories"]
        dir_config["output"] = Prompt.ask(
            "  Where should manim [bold]output[/bold] video and image files place [prompt.default](optional, default is none)",
            default="",
            show_default=False)
        dir_config["raster_images"] = Prompt.ask(
            "  Which folder should manim find [bold]raster images[/bold] (.jpg .png .gif) in "
            "[prompt.default](optional, default is none)",
            default="",
            show_default=False)
        dir_config["vector_images"] = Prompt.ask(
            "  Which folder should manim find [bold]vector images[/bold] (.svg .xdv) in "
            "[prompt.default](optional, default is none)",
            default="",
            show_default=False)
        dir_config["sounds"] = Prompt.ask(
            "  Which folder should manim find [bold]sound files[/bold] (.mp3 .wav) in "
            "[prompt.default](optional, default is none)",
            default="",
            show_default=False)
        dir_config["temporary_storage"] = Prompt.ask(
            "  Which folder should manim storage [bold]temporary files[/bold] "
            "[prompt.default](recommended, use system temporary folder by default)",
            default="",
            show_default=False)

        console.print("[bold]LaTeX:[/bold]")
        tex_config = configuration["tex"]
        tex = Prompt.ask(
            "  Select an executable program to use to compile a LaTeX source file",
            choices=["latex", "xelatex"],
            default="latex")
        if tex == "latex":
            tex_config["executable"] = "latex"
            tex_config["template_file"] = "tex_template.tex"
            tex_config["intermediate_filetype"] = "dvi"
        else:
            tex_config["executable"] = "xelatex -no-pdf"
            tex_config["template_file"] = "ctex_template.tex"
            tex_config["intermediate_filetype"] = "xdv"

        console.print("[bold]Styles:[/bold]")
        configuration["style"]["background_color"] = Prompt.ask(
            "  Which [bold]background color[/bold] do you want [italic](hex code)",
            default="#333333")

        console.print("[bold]Camera qualities:[/bold]")
        table = Table("low",
                      "medium",
                      "high",
                      "ultra_high",
                      title="Four defined qualities",
                      box=box.ROUNDED)
        table.add_row("480p15", "720p30", "1080p60", "2160p60")
        console.print(table)
        configuration["camera_qualities"]["default_quality"] = Prompt.ask(
            "  Which one to choose as the default rendering quality",
            choices=["low", "medium", "high", "ultra_high"],
            default="high")

        write_to_file = Confirm.ask(
            "\n[bold]Are you sure to write these configs to file?[/bold]",
            default=True)
        if not write_to_file:
            raise KeyboardInterrupt

        global_file_name = os.path.join(get_manim_dir(), "manimlib",
                                        "default_config.yml")
        if scope == "global":
            file_name = global_file_name
        else:
            if os.path.exists(global_file_name):
                remove_empty_value(configuration)
            file_name = os.path.join(os.getcwd(), "custom_config.yml")
        with open(file_name, "w", encoding="utf-8") as f:
            yaml.dump(configuration, f)

        console.print(
            f"\n:rocket: You have successfully set up a {scope} configuration file!"
        )
        console.print(
            f"You can manually modify it in: [cyan]`{file_name}`[/cyan]")

    except KeyboardInterrupt:
        console.print("\n[green]Exit configuration guide[/green]")
from rich.prompt import Prompt, Confirm, IntPrompt

trunk_template = [
    "switchport trunk encapsulation dot1q",
    "switchport mode trunk",
    "switchport trunk allowed vlan",
]

interface = Prompt.ask("Interface number")
mode = Prompt.ask("Interface mode", choices=["access", "trunk"])
vlan = IntPrompt.ask("VLAN number", default=1)

Confirm.ask("Continue program?")
Пример #16
0
    def launch_pipeline(self):

        # Check that we have everything we need
        if self.pipeline is None and self.web_id is None:
            log.error(
                "Either a pipeline name or web cache ID is required. Please see nf-core launch --help for more information."
            )
            return False

        # Check if the output file exists already
        if os.path.exists(self.params_out):
            log.warning("Parameter output file already exists! {}".format(
                os.path.relpath(self.params_out)))
            if Confirm.ask("[yellow]Do you want to overwrite this file?"):
                os.remove(self.params_out)
                log.info("Deleted {}\n".format(self.params_out))
            else:
                log.info(
                    "Exiting. Use --params-out to specify a custom filename.")
                return False

        log.info(
            "This tool ignores any pipeline parameter defaults overwritten by Nextflow config files or profiles\n"
        )

        # Check if we have a web ID
        if self.web_id is not None:
            self.schema_obj = nf_core.schema.PipelineSchema()
            try:
                if not self.get_web_launch_response():
                    log.info(
                        "Waiting for form to be completed in the browser. Remember to click Finished when you're done."
                    )
                    log.info("URL: {}".format(self.web_schema_launch_web_url))
                    nf_core.utils.wait_cli_function(
                        self.get_web_launch_response)
            except AssertionError as e:
                log.error(e.args[0])
                return False

            # Load local params if supplied
            self.set_schema_inputs()
            # Load schema defaults
            self.schema_obj.get_schema_defaults()

        # No --id supplied, fetch parameter inputs
        else:
            # Build the schema and starting inputs
            if self.get_pipeline_schema() is False:
                return False
            self.set_schema_inputs()
            self.merge_nxf_flag_schema()

            # Collect user inputs via web or cli
            if self.prompt_web_gui():
                try:
                    self.launch_web_gui()
                except AssertionError as e:
                    log.error(e.args[0])
                    return False
            else:
                # Kick off the interactive wizard to collect user inputs
                self.prompt_schema()

        # Validate the parameters that we now have
        if not self.schema_obj.validate_params():
            return False

        # Strip out the defaults
        if not self.save_all:
            self.strip_default_params()

        # Build and launch the `nextflow run` command
        self.build_command()
        self.launch_workflow()
Пример #17
0
def get_recording_filepaths(
    key: dict,
    rec_metadata: pd.DataFrame,
    recordings_folder: Path,
    rec_folder: str,
) -> dict:

    # check if recording has been validated
    metadata = rec_metadata.loc[rec_metadata["recording folder"] ==
                                rec_folder].iloc[0]

    if metadata.Validated != "yes" or metadata["USE?"] != "yes":
        logger.debug(
            f'Recording for {key["name"]} was not validated - skipping.')
        return

    if metadata["spike sorted"] != "yes":
        logger.debug(
            f'Recording for {key["name"]} not yet spike sorted - skipping.')
        return

    # Check if it's a concatenated recording
    # concat_filepath = metadata["concatenated recording file"]
    # if isinstance(concat_filepath, str):
    #     # it was concatenated
    #     rec_name = concat_filepath
    #     rec_path = recordings_folder / Path(rec_name)
    #     key["concatenated"] = 1

    #     if not rec_path.is_dir() or not files(rec_path):
    #         logger.warning(
    #             f'Invalid rec folder: {rec_path} for session {key["name"]} - empty or not existant rec folder.'
    #         )
    #         return None

    #     rec_name = rec_name + "_g0"
    # else:
    rec_name = rec_metadata.loc[rec_metadata["recording folder"] ==
                                rec_folder]["recording folder"].iloc[0]
    rec_path = recordings_folder / Path(rec_name) / Path(rec_name + "_imec0")
    key["concatenated"] = -1

    # complete the paths to all relevant files
    key["spike_sorting_params_file_path"] = str(
        rec_path / (rec_name + "_t0.imec0.ap.prm"))
    key["spike_sorting_spikes_file_path"] = str(
        rec_path / (rec_name + "_t0.imec0.ap.csv"))
    key["spike_sorting_clusters_file_path"] = str(
        rec_path / (rec_name + "_t0.imec0.ap_res.mat"))

    for name in (
            "spike_sorting_params_file_path",
            "spike_sorting_spikes_file_path",
            "spike_sorting_clusters_file_path",
    ):
        if not Path(key[name]).exists():
            logger.warning(
                f'Cant find file for "{name}" in session "{key["name"]}" - maybe not spike sorted yet?\nPath: {key[name]}'
            )
            if Confirm.ask("Insert placeholder?"):
                pass
            else:
                return None

    # get probe configuration
    key["recording_probe_configuration"], key["reference"] = metadata[
        "probe config"].split("_")
    return key
Пример #18
0
    def __init__(
        self,
        passwords,
        users,
        host,
        module,
        path,
        output,
        attempts,
        interval,
        equal,
        timeout,
        port,
        fireprox,
        domain,
        analyze,
        jitter,
        jitter_min,
        notify,
        webhook,
        pause,
    ):
        """
        Validate args and initalize class attributes
        """

        # if any other module than Office365 is specified, make sure hostname was provided
        if module.lower() != "office365" and not host:
            console.print(
                "[!] Hostname (-H) of target (mail.targetdomain.com) is required for all modules execept Office365",
                style="danger",
            )
            exit()

        elif module.lower() == "office365" and not host:
            host = "Office365"  # set host to Office365 for the logfile name
        elif module.lower() == "smb" and (timeout != 5 or fireprox
                                          or port != 443):
            console.print(
                "[!] Fireprox (-f), port (-P) and timeout (-t) are incompatible when spraying over SMB",
                style="warning",
            )

        # get usernames from file
        try:
            with open(users, "r") as f:
                user_list = f.read().splitlines()
        except Exception:
            console.print(f"[!] Error reading usernames from file: {users}",
                          style="danger")
            exit()

        # get passwords from file, otherwise treat arg as a single password to spray
        try:
            with open(passwords, "r") as f:
                password_list = f.read().splitlines()
        except Exception:
            password_list = [passwords]

        # check that interval and attempt args are supplied together
        if interval and not attempts:
            console.print(
                "[!] Number of login attempts per interval (-a) required with -i",
                style="danger",
            )
            exit()
        elif not interval and attempts:
            console.print("[!] Minutes per interval (-i) required with -a",
                          style="danger")
            exit()
        elif not interval and not attempts and len(password_list) > 1:
            console.print(
                "[*] You have not provided spray attempts/interval. This may lead to account lockouts!",
                style="warning",
            )
            print()

            Confirm.ask(
                "[yellow]Press enter to continue anyways",
                default=True,
                show_choices=False,
                show_default=False,
            )

        # Check that jitter flags aren't supplied independently
        if jitter_min and not jitter:
            console.print("--jitter-min flag must be set with --jitter flag",
                          style="danger")
            exit()

        elif jitter and not jitter_min:
            console.print(
                "[!] --jitter flag must be set with --jitter-min flag",
                style="danger")
            exit()

        if jitter and jitter_min and jitter_min >= jitter:
            console.print(
                "[!] --jitter flag must be greater than --jitter-min flag",
                style="danger",
            )
            exit()

        # Making sure user set path variable for NTLM authentication module
        if module.lower() == "ntlm" and path is None:
            console.print(
                "[!] Must set --path to use the NTLM authentication module",
                style="danger",
            )
            exit()

        if notify and webhook is None:
            console.print(
                "[!] Must specify a Webhook URL when the notify flag is used.",
                style="danger",
            )
            exit()

        # Create spraycharles directories if they don't exist
        user_home = str(Path.home())
        if not os.path.exists(f"{user_home}/.spraycharles"):
            os.mkdir(f"{user_home}/.spraycharles")
            os.mkdir(f"{user_home}/.spraycharles/logs")
            os.mkdir(f"{user_home}/.spraycharles/out")

        # Building output files
        current = datetime.datetime.now()
        timestamp = current.strftime("%Y%m%d-%H%M%S")
        if output == "output.csv":
            output = f"{user_home}/.spraycharles/out/{host}.{timestamp}.csv"

        self.passwords = password_list
        self.password_file = passwords
        self.usernames = user_list
        self.user_file = users
        self.host = host
        self.module = module
        self.path = path
        self.output = output
        self.attempts = attempts
        self.interval = interval
        self.equal = equal
        self.timeout = timeout
        self.port = port
        self.fireprox = fireprox
        self.domain = domain
        self.analyze = analyze
        self.jitter = jitter
        self.jitter_min = jitter_min
        self.notify = notify
        self.webhook = webhook
        self.pause = pause
        self.total_hits = 0
        self.login_attempts = 0
        self.target = None
        self.log_name = None
def assign_prs(
    github_token: str,
    previous_release: str,
    current_release: str,
    verbose: bool,
    limit_pr_count: Optional[int],
    dry_run: bool,
    milestone_number: int,
    skip_assigned: bool,
    print_summary: bool,
    assume_yes: bool,
    output_folder: str,
):
    changes = get_changes(verbose, previous_release, current_release)
    changes = list(filter(lambda change: change.pr is not None, changes))
    prs = [change.pr for change in changes]

    g = Github(github_token)
    repo = g.get_repo("apache/airflow")

    if output_folder and not print_summary:
        console.print(
            "\n[yellow]Implying --print-summary as output folder is enabled[/]\n"
        )
        print_summary = True
    if print_summary and not skip_assigned:
        console.print(
            "\n[yellow]Implying --skip-assigned as summary report is enabled[/]\n"
        )
        skip_assigned = True
    milestone = repo.get_milestone(milestone_number)
    count_prs = len(prs)
    if limit_pr_count:
        count_prs = limit_pr_count
    console.print(
        f"\n[green]Applying Milestone: {milestone.title} to {count_prs} merged PRs[/]\n"
    )
    if dry_run:
        console.print("[yellow]Dry run mode![/]\n")
    else:
        if not assume_yes and not Confirm.ask("Is this OK?"):
            sys.exit(1)

    doc_only_label = repo.get_label(TYPE_DOC_ONLY_LABEL)
    changelog_skip_label = repo.get_label(CHANGELOG_SKIP_LABEL)
    changelog_changes: List[Change] = []
    doc_only_changes: List[Change] = []
    excluded_changes: List[Change] = []
    for i in range(count_prs):
        pr_number = prs[i]
        if pr_number is None:
            # Should not happen but MyPy is not happy
            continue
        console.print('-' * 80)
        console.print(f"\n >>>> Retrieving PR#{pr_number}: "
                      f"https://github.com/apache/airflow/pull/{pr_number}")
        pr: PullRequest
        try:
            pr = repo.get_pull(pr_number)
        except UnknownObjectException:
            # Fallback to issue if PR not found
            try:
                # PR has almost the same fields as Issue
                pr = cast(PullRequest, repo.get_issue(pr_number))
            except UnknownObjectException:
                console.print(
                    f"[red]The PR #{pr_number} could not be found[/]")
                continue
        console.print(f"\nPR:{pr_number}: {pr.title}\n")
        label_names = [label.name for label in pr.labels]
        already_assigned_milestone_number = pr.milestone.number if pr.milestone else None
        if already_assigned_milestone_number == milestone.number:
            console.print(
                f"[green]The PR #{pr_number} is already "
                f"assigned to the milestone: {pr.milestone.title}[/]. Labels: {label_names}"
            )
            if TYPE_DOC_ONLY_LABEL in label_names:
                console.print(
                    "[yellow]It will be classified as doc-only change[/]\n")
                if skip_assigned:
                    doc_only_changes.append(changes[i])
            elif CHANGELOG_SKIP_LABEL in label_names:
                console.print(
                    "[yellow]It will be excluded from changelog[/]\n")
                if skip_assigned:
                    excluded_changes.append(changes[i])
            else:
                console.print(
                    "[green]The change will be included in changelog[/]\n")
                if skip_assigned:
                    changelog_changes.append(changes[i])
            if skip_assigned:
                continue
        elif already_assigned_milestone_number is not None:
            console.print(
                f"[yellow]The PR #{pr_number} is already "
                f"assigned to another milestone: {pr.milestone.title}[/]. Labels: {label_names}"
            )
        # Ignore doc-only and skipped PRs
        console.print(f"Marking the PR #{pr_number} as {milestone.title}")
        chosen_option = Prompt.ask(
            "Choose action:",
            choices=[
                "a", "add", "d", "doc", "e", "exclude", "s", "skip", "q",
                "quit"
            ],
            default="skip",
        ).lower()
        if chosen_option in ("add", "a"):
            console.print(f"Adding the PR #{pr_number} to {milestone.title}")
            if not dry_run:
                update_milestone(repo, pr, milestone)
            if skip_assigned:
                changelog_changes.append(changes[i])
        elif chosen_option in ("doc", "d"):
            console.print(
                f"Applying the label {doc_only_label} the PR #{pr_number}")
            if not dry_run:
                pr.add_to_labels(doc_only_label)
                update_milestone(repo, pr, milestone)
            if skip_assigned:
                doc_only_changes.append(changes[i])
        elif chosen_option in ("exclude", "e"):
            console.print(
                f"Applying the label {changelog_skip_label} the PR #{pr_number}"
            )
            if not dry_run:
                pr.add_to_labels(changelog_skip_label)
                update_milestone(repo, pr, milestone)
            if skip_assigned:
                excluded_changes.append(changes[i])
        elif chosen_option in ("skip", "s"):
            console.print(f"Skipping the PR #{pr_number}")
        elif chosen_option in ("quit", "q"):
            sys.exit(2)

    if print_summary:
        context = {
            "changelog_changes": changelog_changes,
            "excluded_changes": excluded_changes,
            "doc_only_changes": doc_only_changes,
            "previous_release": previous_release,
            "current_release": current_release,
        }
        console.print(
            render_template("CHERRY_PICK_SUMMARY.txt", context=context))

    if output_folder:

        def write_commits(type: str, path: Path,
                          changes_to_write: List[Change]):
            path.write_text("\n".join(change.short_hash
                                      for change in changes_to_write) + "\n")
            console.print(f"\n{type} commits written in {path}")

        write_commits("Changelog",
                      Path(output_folder) / CHANGELOG_CHANGES_FILE,
                      changelog_changes)
        write_commits("Doc only",
                      Path(output_folder) / DOC_ONLY_CHANGES_FILE,
                      doc_only_changes)
        write_commits("Excluded",
                      Path(output_folder) / EXCLUDED_CHANGES_FILE,
                      excluded_changes)
        console.print("\n")
Пример #20
0
    def launch_pipeline(self):

        # Prompt for pipeline if not supplied and no web launch ID
        if self.pipeline is None and self.web_id is None:
            launch_type = questionary.select(
                "Launch local pipeline or remote GitHub pipeline?",
                choices=["Remote pipeline", "Local path"],
                style=nf_core.utils.nfcore_question_style,
            ).unsafe_ask()

            if launch_type == "Remote pipeline":
                try:
                    self.pipeline = nf_core.utils.prompt_remote_pipeline_name(
                        self.wfs)
                except AssertionError as e:
                    log.error(e.args[0])
                    return False
            else:
                self.pipeline = questionary.path(
                    "Path to workflow:",
                    style=nf_core.utils.nfcore_question_style).unsafe_ask()

        # Check if the output file exists already
        if os.path.exists(self.params_out):
            log.warning("Parameter output file already exists! {}".format(
                os.path.relpath(self.params_out)))
            if Confirm.ask("[yellow]Do you want to overwrite this file?"):
                os.remove(self.params_out)
                log.info("Deleted {}\n".format(self.params_out))
            else:
                log.info(
                    "Exiting. Use --params-out to specify a custom filename.")
                return False

        log.info(
            "NOTE: This tool ignores any pipeline parameter defaults overwritten by Nextflow config files or profiles\n"
        )

        # Check if we have a web ID
        if self.web_id is not None:
            self.schema_obj = nf_core.schema.PipelineSchema()
            try:
                if not self.get_web_launch_response():
                    log.info(
                        "Waiting for form to be completed in the browser. Remember to click Finished when you're done."
                    )
                    log.info("URL: {}".format(self.web_schema_launch_web_url))
                    nf_core.utils.wait_cli_function(
                        self.get_web_launch_response)
            except AssertionError as e:
                log.error(e.args[0])
                return False

            # Load local params if supplied
            self.set_schema_inputs()
            # Load schema defaults
            self.schema_obj.get_schema_defaults()

        # No --id supplied, fetch parameter inputs
        else:
            # Build the schema and starting inputs
            if self.get_pipeline_schema() is False:
                return False
            self.set_schema_inputs()
            self.merge_nxf_flag_schema()

            # Collect user inputs via web or cli
            if self.prompt_web_gui():
                try:
                    self.launch_web_gui()
                except AssertionError as e:
                    log.error(e.args[0])
                    return False
            else:
                # Kick off the interactive wizard to collect user inputs
                self.prompt_schema()

        # Validate the parameters that we now have
        if not self.schema_obj.validate_params():
            return False

        # Strip out the defaults
        if not self.save_all:
            self.strip_default_params()

        # Build and launch the `nextflow run` command
        self.build_command()
        self.launch_workflow()
Пример #21
0
def reset():
    confirm = Confirm.ask("Do you really want to reset the config?")
    if confirm:
        write_initial_config()