コード例 #1
0
def main():
    parser = argparse.ArgumentParser(description='List and edit recipemd tags')

    parser.add_argument(
        '-f', '--filter', type=filter_string,
        help='Filter recipes by tags. Expects a boolean string, e.g. "cake and vegan"'
    )
    parser.add_argument('-s', '--no-messages', action='store_true', default=False, help='suppress error messages')
    parser.add_argument(
        '-1', action='store_true', dest='one_per_line', default=False,
        help=' Force output to be one entry per line. This is the default when output is not to a terminal.'
    )

    subparsers = parser.add_subparsers(metavar="action", required=True)

    # recipes
    parser_recipes = subparsers.add_parser('recipes', help='list recipe paths')
    parser_recipes.set_defaults(func=recipes)

    parser_recipes.add_argument(
        'folder', type=dir_path, nargs='?', default='.', help='path to a folder containing recipemd files. Works '
                                                              'recursively for all *.md files.'
        # very unlikely file extension so completer only returns folders
    ).completer = FilesCompleter(allowednames="*.7CA0B927-3B02-48EA-97A9-CB557E061992")

    # list tags
    parser_list = subparsers.add_parser('list', help="list used tags")
    parser_list.set_defaults(func=list_tags)

    parser_list.add_argument('-c', '--count', action='store_true', help="count number of uses per tag")
    parser_list.add_argument(
        'folder', type=dir_path, nargs='?', default='.', help='path to a folder containing recipemd files. Works '
                                                              'recursively for all *.md files.'
        # very unlikely file extension so completer only returns folders
    ).completer = FilesCompleter(allowednames="*.7CA0B927-3B02-48EA-97A9-CB557E061992")

    # TODO edit
    # parser_edit = subparsers.add_parser('edit', help='edit tags')

    # completions
    argcomplete.autocomplete(parser)

    args = parser.parse_args()
    args.func(args)
コード例 #2
0
ファイル: parser.py プロジェクト: cevans87/rosdev
    def to_node(self) -> Node:
        """Return Node constructed by parsing args as if from sys.argv."""

        parser = self.get_parser()
        autocomplete(parser,
                     default_completer=FilesCompleter(allowednames='',
                                                      directories=False))

        parsed_args = parser.parse_args(self.args).__dict__
        if not parsed_args:
            Node.get_parser_structure.memoize.remove()
            parser.parse_args([*self.args, '--help'])
            sys.exit(1)

        if parsed_args['args'] and parsed_args['args'][0] == '--':
            parsed_args['args'] = parsed_args['args'][1:]
        parsed_args['args'] = ' '.join(parsed_args['args'])
        parsed_args['ports'] = frozenset(
            str(port) for port in parsed_args['ports'])

        mounts = []
        for mount in parsed_args['mounts']:
            host_path, container_path = mount.split(':', 1)
            host_path, container_path = Path(host_path), Path(container_path)
            if not host_path.is_absolute():
                host_path = host_path.absolute()
            if not container_path.is_absolute():
                container_path = container_path.absolute().image_build()
            mounts.append(
                Options.Mount(container_path=container_path,
                              host_path=host_path))
        parsed_args['mounts'] = frozenset(mounts)

        command_class = parsed_args.pop('command_class')
        command_module = parsed_args.pop('command_module')

        options = Options(**parsed_args)

        try:
            node = getattr(import_module(command_module),
                           command_class)(options)
        except AttributeError:
            print(
                f'Invalid command "{command_class.replace(".", " ")}"\n{parser.format_usage()}',
                file=sys.stderr)
            sys.exit(1)

        return node
コード例 #3
0
def load_arguments(self, _):
    # synapse spark
    for scope in ['job', 'session', 'statement']:
        with self.argument_context('synapse spark ' + scope) as c:
            c.argument('workspace_name', help='The name of the workspace.')
            c.argument('spark_pool_name', help='The name of the Spark pool.')

    for scope in ['synapse spark job', 'synapse spark session']:
        with self.argument_context(scope + ' list') as c:
            c.argument(
                'from_index',
                help=
                'Optional parameter specifying which index the list should begin from.'
            )
            c.argument(
                'size',
                help=
                'The size of the returned list.By default it is 20 and that is the maximum.'
            )

    with self.argument_context('synapse spark job submit') as c:
        c.argument('main_definition_file',
                   help='The main file used for the job.')
        c.argument(
            'main_class_name',
            help=
            'The fully-qualified identifier or the main class that is in the main definition file.'
        )
        c.argument(
            'command_line_arguments',
            nargs='+',
            help=
            'Optional arguments to the job (Note: please use storage URIs for file arguments).'
        )
        c.argument('archives', nargs='+', help='The array of archives.')
        c.argument('job_name', arg_type=name_type, help='The Spark job name.')
        c.argument(
            'reference_files',
            nargs='+',
            help=
            'Additional files used for reference in the main definition file.')
        c.argument('configuration',
                   type=get_json_object,
                   help='The configuration of Spark job.')
        c.argument('executors', help='The number of executors.')
        c.argument('executor_size',
                   arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The executor size')
        c.argument('tags', arg_type=tags_type)
        c.argument('language',
                   arg_type=get_enum_type(SparkBatchLanguage,
                                          default=SparkBatchLanguage.Scala),
                   help='The Spark job language.')

    for scope in ['show', 'cancel']:
        with self.argument_context('synapse spark job ' + scope) as c:
            c.argument('job_id',
                       options_list=['--livy-id'],
                       arg_group='Spark job',
                       help='The id of the Spark job.')

    with self.argument_context('synapse spark session create') as c:
        c.argument('job_name',
                   arg_type=name_type,
                   help='The Spark session name.')
        c.argument(
            'reference_files',
            nargs='+',
            help=
            'Additional files used for reference in the main definition file.')
        c.argument('configuration',
                   type=get_json_object,
                   help='The configuration of Spark session.')
        c.argument('executors', help='The number of executors.')
        c.argument('executor_size',
                   arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The executor size')
        c.argument('tags', arg_type=tags_type)

    for scope in ['show', 'cancel', 'reset-timeout']:
        with self.argument_context('synapse spark session ' + scope) as c:
            c.argument('session_id',
                       options_list=['--livy-id'],
                       arg_group='Spark Session',
                       help='The id of the Spark session job.')

    with self.argument_context('synapse spark statement') as c:
        c.argument('session_id', help='The id of Spark session.')

    for scope in ['show', 'cancel']:
        with self.argument_context('synapse spark statement ' + scope) as c:
            c.argument('statement_id',
                       options_list=['--livy-id'],
                       arg_group="Spark statement",
                       help='The id of the statement.')

    with self.argument_context('synapse spark statement invoke') as c:
        c.argument(
            'code',
            completer=FilesCompleter(),
            help=
            'The code of Spark statement. This is either the code contents or use `@<file path>` to load the content from a file'
        )
        c.argument('language',
                   arg_type=get_enum_type(SparkStatementLanguage),
                   validator=validate_statement_language,
                   help='The language of Spark statement.')

    # synapse workspace
    for scope in ['show', 'create', 'update', 'delete']:
        with self.argument_context('synapse workspace ' + scope) as c:
            c.argument('workspace_name',
                       arg_type=name_type,
                       id_part='name',
                       help='The workspace name.')

    for scope in ['create', 'update']:
        with self.argument_context('synapse workspace ' + scope) as c:
            c.argument('sql_admin_login_password',
                       options_list=['--sql-admin-login-password', '-p'],
                       help='The sql administrator login password.')
            c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse workspace create') as c:
        c.argument("storage_account",
                   validator=validate_storage_account,
                   help='The data lake storage account name or resource id.')
        c.argument('file_system',
                   help='The file system of the data lake storage account.')
        c.argument('sql_admin_login_user',
                   options_list=['--sql-admin-login-user', '-u'],
                   help='The sql administrator login user name.')

    with self.argument_context('synapse workspace check-name') as c:
        c.argument('name',
                   arg_type=name_type,
                   help='The name you wanted to check.')

    # synapse spark pool
    with self.argument_context('synapse spark pool') as c:
        c.argument('workspace_name',
                   id_part='name',
                   help='The workspace name.')

    with self.argument_context('synapse spark pool list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'update', 'delete']:
        with self.argument_context('synapse spark pool ' + scope) as c:
            c.argument('spark_pool_name',
                       arg_type=name_type,
                       id_part='child_name_1',
                       help='The name of the Spark pool.')

    with self.argument_context('synapse spark pool create') as c:
        # Node
        c.argument('node_count', arg_group='Node', help='The number of node.')
        c.argument('node_size_family',
                   arg_group='Node',
                   help='The node size family.')
        c.argument('node_size',
                   arg_group='Node',
                   arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The node size.')

        # AutoScale
        c.argument('enable_auto_scale',
                   arg_type=get_three_state_flag(),
                   arg_group='AutoScale',
                   help='The flag of enabling auto scale.')
        c.argument('max_node_count',
                   arg_group='AutoScale',
                   help='The max node count.')
        c.argument('min_node_count',
                   arg_group='AutoScale',
                   help='The min node count.')

        # AutoPause
        c.argument('enable_auto_pause',
                   arg_type=get_three_state_flag(),
                   arg_group='AutoPause',
                   help='The flag of enabling auto pause.')
        c.argument('delay',
                   arg_group='AutoPause',
                   help='The delay time whose unit is minute.')

        # Environment Configuration
        c.argument('library_requirements_file',
                   arg_group='Environment Configuration',
                   help='The library requirements file.')

        # Default Folder
        c.argument('spark_events_folder',
                   arg_group='Default Folder',
                   help='The Spark events folder.')
        c.argument('default_spark_log_folder',
                   arg_group='Default Folder',
                   help='The default Spark log folder.')

        # Component Version
        c.argument('spark_version',
                   arg_group='Component Version',
                   help='The supported Spark version is 2.4 now.')

        c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse spark pool update') as c:
        c.argument('tags', arg_type=tags_type)
        # Node
        c.argument('node_count', arg_group='Node', help='The number of node.')
        c.argument('node_size_family',
                   arg_group='Node',
                   help='The node size family.')

        c.argument('node_size',
                   arg_group='Node',
                   arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The node size.')
        # AutoScale
        c.argument('enable_auto_scale',
                   arg_type=get_three_state_flag(),
                   arg_group='AutoScale',
                   help='The flag of enabling auto scale.')
        c.argument('max_node_count',
                   arg_group='AutoScale',
                   help='The max node count.')
        c.argument('min_node_count',
                   arg_group='AutoScale',
                   help='The min node count.')

        # AutoPause
        c.argument('enable_auto_pause',
                   arg_type=get_three_state_flag(),
                   arg_group='AutoPause',
                   help='The flag of enabling auto pause.')
        c.argument('delay',
                   arg_group='AutoPause',
                   help='The delay time whose unit is minute.')

        # Environment Configuration
        c.argument('library_requirements_file',
                   arg_group='Environment Configuration',
                   help='The library requirements file.')
        c.argument('force',
                   arg_type=get_three_state_flag(),
                   help='The flag of force operation.')

    # synapse sql pool
    with self.argument_context('synapse sql pool') as c:
        c.argument('workspace_name',
                   id_part='name',
                   help='The workspace name.')

    with self.argument_context('synapse sql pool list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'delete', 'update', 'pause', 'resume']:
        with self.argument_context('synapse sql pool ' + scope) as c:
            c.argument('sql_pool_name',
                       arg_type=name_type,
                       id_part='child_name_1',
                       help='The SQL pool name.')

    with self.argument_context('synapse sql pool create') as c:
        c.argument('performance_level', help='The performance level.')
        c.argument('source_database_id', help='The source database id.')
        c.argument('recoverable_database_id',
                   help='The recoverable database id.')
        c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse sql pool update') as c:
        c.argument('sku_name',
                   options_list=['--performance-level'],
                   help='The performance level.')
        c.argument('tags', arg_type=tags_type)

    # synapse workspace firewall-rule
    with self.argument_context('synapse workspace firewall-rule') as c:
        c.argument('workspace_name',
                   id_part='name',
                   help='The workspace name.')

    with self.argument_context('synapse workspace firewall-rule list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'delete']:
        with self.argument_context('synapse workspace firewall-rule ' +
                                   scope) as c:
            c.argument('rule_name',
                       arg_type=name_type,
                       id_part='child_name_1',
                       help='The IP firewall rule name')

    with self.argument_context('synapse workspace firewall-rule create') as c:
        c.argument(
            'start_ip_address',
            help=
            'The start IP address of the firewall rule. Must be IPv4 format.')
        c.argument(
            'end_ip_address',
            help='The end IP address of the firewall rule. Must be IPv4 format. '
            'Must be greater than or equal to startIpAddress.')
コード例 #4
0
ファイル: parser.py プロジェクト: weisk/nichtparasoup
__all__ = ["create_parser"]

from argparse import ArgumentParser
from typing import Any, Set

from argcomplete import FilesCompleter  # type: ignore

from nichtparasoup.imagecrawler import get_imagecrawlers


def imagecrawler_completion(*args: Any, **kwargs: Any) -> Set[str]:
    return set(get_imagecrawlers().names())


yaml_file_completion = FilesCompleter(('yaml', 'yml'))


def create_parser() -> ArgumentParser:  # pragma: no cover
    # used `__tmp_action`  several times, to omit type-checkers warning ala 'Action has no attribute "completer"'

    parser = ArgumentParser(
        add_help=True,
        allow_abbrev=False,
    )

    parser.add_argument(
        '--debug',
        help='Enable debug output.',
        action='store_true',
        dest="debug",
    )
コード例 #5
0
ファイル: find.py プロジェクト: tstehr/RecipeMD
subparsers = parser.add_subparsers(metavar="action", required=True)

# recipes
parser_recipes = subparsers.add_parser('recipes', help='list recipe paths')
parser_recipes.set_defaults(func=list_recipes)

parser_recipes.add_argument(
    'folder',
    type=dir_path,
    nargs='?',
    default='.',
    help='path to a folder containing recipemd files. Works '
    'recursively for all *.md files.'
    # very unlikely file extension so completer only returns folders
).completer = FilesCompleter(
    allowednames="*.7CA0B927-3B02-48EA-97A9-CB557E061992")

# list tags
parser_tags = subparsers.add_parser('tags', help="list used tags")
parser_tags.set_defaults(func=list_tags)

parser_tags.add_argument('-c',
                         '--count',
                         action='store_true',
                         help="count number of uses per tag")
parser_tags.add_argument(
    'folder',
    type=dir_path,
    nargs='?',
    default='.',
    help='path to a folder containing recipemd files. Works '
コード例 #6
0
ファイル: _params.py プロジェクト: wnjenkin/azure-cli
def load_arguments(self, _):

    with self.argument_context('batchai') as c:
        c.argument('resource_group', resource_group_name_type)

    with self.argument_context('batchai cluster') as c:
        c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create the cluster. If the location is not specified and default location is not configured, will default to the resource group\'s location')
        c.argument('cluster_name', options_list=['--name', '-n'], help='Name of the cluster.')

    with self.argument_context('batchai cluster create') as c:
        c.argument('json_file', options_list=['--config', '-c'], help='A path to a json file containing cluster create parameters (json representation of azure.mgmt.batchai.models.ClusterCreateParameters).', arg_group='Advanced')

    with self.argument_context('batchai cluster create') as c:
        c.argument('setup_task', help='A command line which should be executed on each compute node when it\'s got allocated or rebooted. The task is executed under a user account added into sudoers list (so, it can use sudo). Note, if this parameter is specified, it will overwrite setup task given in the configuration file.', arg_group='Setup Task')
        c.argument('setup_task_output', help='Location of the folder where setup-task\'s logs will be stored. Required if setup-task argument is provided. Note, Batch AI will create create several helper folders under this location. The created folders are reported as stdOutErrPathSuffix by get cluster command.', arg_group='Setup Task')

    with self.argument_context('batchai cluster create', arg_group='Virtual Network') as c:
        c.argument('subnet', options_list=['--subnet'], help='Resource id of a virtual network subnet to put the cluster in.')

    with self.argument_context('batchai cluster create', arg_group='Admin Account') as c:
        c.argument('user_name', options_list=['--user-name', '-u'], help='Name of the admin user account to be created on each compute node. If the value is not provided and no user configuration is provided in the config file, current user\'s name will be used.')
        c.argument('ssh_key', options_list=['--ssh-key', '-k'], help='SSH public key value or path. If the value is not provided and no ssh public key or password is configured in the config file the default public ssh key (~/.ssh/id_rsa.pub) of the current user will be used (if available).', completer=FilesCompleter())
        c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory.')
        c.argument('password', options_list=['--password', '-p'], help='Password.')

    with self.argument_context('batchai cluster create', arg_group='Auto Storage') as c:
        c.argument('use_auto_storage', action='store_true', help='If provided, the command will create a storage account in a new or existing resource group named "batchaiautostorage". It will also create Azure File Share with name "batchaishare", Azure Blob Container with name "batchaicontainer". The File Share and Blob Container will be mounted on each cluster node at $AZ_BATCHAI_MOUNT_ROOT/autoafs and $AZ_BATCHAI_MOUNT_ROOT/autobfs. If the resource group already exists and contains an approapriate storage account belonging to the same region as cluster, this command will reuse existing storage account.')

    with self.argument_context('batchai cluster create', arg_group='Nodes') as c:
        c.argument('image', options_list=['--image', '-i'], help='Operation system image for cluster nodes. The value may contain an alias ({0}) or specify image details in the form "publisher:offer:sku:version". If image configuration is not provided via command line or configuration file, Batch AI will choose default OS image'.format(', '.join(custom.SUPPORTED_IMAGE_ALIASES.keys())))
        c.argument('custom_image', help='Resource id of a virtual machine image to be used for nodes creation. Note, you need to provide --image with which this image was created.')
        c.argument('vm_size', options_list=['--vm-size', '-s'], help='VM size for cluster nodes (e.g. Standard_NC6 for 1 GPU node)', completer=get_vm_size_completion_list)
        c.argument('target', options_list=['--target', '-t'], help='Number of nodes which should be allocated immediately after cluster creation. If the cluster is in auto-scale mode, BatchAI can change the number of nodes later based on number of running and queued jobs.')
        c.argument('min_nodes', options_list=['--min'], help='Min nodes count for the auto-scale cluster.', type=int)
        c.argument('max_nodes', options_list=['--max'], help='Max nodes count for the auto-scale cluster.', type=int)
        c.argument('vm_priority', arg_type=get_enum_type(['dedicated', 'lowpriority']), options_list=['--vm-priority'], help="VM priority. If lowpriority selected the node can get preempted while the job is running.")

    with self.argument_context('batchai cluster create', arg_group='File Server Mount') as c:
        c.argument('nfs_name', options_list=['--nfs-name', '--nfs'], help='Name of a file server to mount on each cluster node. If you need to mount more than one file server, configure them in a configuration file and use --config option.')
        c.argument('nfs_resource_group', options_list=['--nfs-resource-group'], help='Resource group in which file server is created. Can be omitted if the file server and the cluster belong to the same resource group')
        c.argument('nfs_mount_path', options_list=['--nfs-mount-path'], help='Relative mount path for NFS. The NFS will be available at $AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder.')

    with self.argument_context('batchai cluster create', arg_group='Storage Account') as c:
        c.argument('account_name', options_list=['--storage-account-name'], help='Storage account name for Azure File Shares and/or Azure Storage Containers to be mounted on each cluster node. Related environment variable: AZURE_BATCHAI_STORAGE_ACCOUNT. Must be used in conjunction with --storage-account-key. If the key is not provided, the command will try to query the storage account key using the authenticated Azure account.')
        c.argument('account_key', options_list=['--storage-account-key'], help='Storage account key. Must be used in conjunction with --storage-account-name. Environment variable: AZURE_BATCHAI_STORAGE_KEY. Optional if the storage account belongs to the current subscription.')

    with self.argument_context('batchai cluster create', arg_group='Azure File Share Mount') as c:
        c.argument('azure_file_share', options_list=['--afs-name'], help='Name of the Azure File Share to be mounted on each cluster node. Must be used in conjunction with --storage-account-name and --storage-account-key arguments or AZURE_BATCHAI_STORAGE_ACCOUNT and AZURE_BATCHAI_STORAGE_KEY environment variables. If you need to mount more than one Azure File Share, configure them in a configuration file and use --config option.')
        c.argument('afs_mount_path', options_list=['--afs-mount-path'], help='Relative mount path for Azure File share. The file share will be available at $AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder.')

    with self.argument_context('batchai cluster create', arg_group='Azure Storage Container Mount') as c:
        c.argument('container_name', options_list=['--bfs-name', '--container-name'], help='Name of Azure Storage container to be mounted on each cluster node. Must be used in conjunction with --storage-account-name and --storage-account-key arguments or AZURE_BATCHAI_STORAGE_ACCOUNT and AZURE_BATCHAI_STORAGE_KEY environment variables. If you need to mount more than one Azure Storage Blob Container, configure them in a configuration file and use --config option.')
        c.argument('container_mount_path', options_list=['--bfs-mount-path', '--container-mount-path'], help='Relative mount path for Azure Storage container. The container will be available at $AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder.')

    with self.argument_context('batchai cluster resize') as c:
        c.argument('target', options_list=['--target', '-t'], help='Target number of compute nodes.')

    with self.argument_context('batchai cluster auto-scale') as c:
        c.argument('min_nodes', options_list=['--min'], help='Minimum number of nodes.')
        c.argument('max_nodes', options_list=['--max'], help='Maximum number of nodes.')

    with self.argument_context('batchai cluster list-files') as c:
        c.argument('path', options_list=['--path', '-p'], help='Relative path of a subfolder inside of node setup task output directory.')
        c.argument('expiry', options_list=['--expiry', '-e'], help='Time in minutes for how long generated download URLs should remain valid.')

    with self.argument_context('batchai cluster list') as c:
        # Not implemented yet
        c.ignore('clusters_list_options')

    with self.argument_context('batchai job') as c:
        c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create the job. If default location is not configured, will default to the resource group\'s location')
        c.argument('job_name', options_list=['--name', '-n'], help='Name of the job.')
        c.argument('cluster_name', options_list=['--cluster-name', '-r'], help='Name of the cluster.')

    with self.argument_context('batchai job create') as c:
        c.argument('json_file', options_list=['--config', '-c'], help='A path to a json file containing job create parameters (json representation of azure.mgmt.batchai.models.JobCreateParameters).')
        c.argument('cluster_name', options_list=['--cluster-name', '-r'], help='If specified, the job will run on the given cluster instead of the one configured in the json file.')
        c.argument('cluster_resource_group', options_list=['--cluster-resource-group'], help='Specifies a resource group for the cluster given with --cluster-name parameter. If omitted, --resource-group value will be used.')

    with self.argument_context('batchai job create', arg_group='Azure File Share Mount') as c:
        c.argument('azure_file_share', options_list=['--afs-name'], help='Name of the Azure File Share to mount during the job execution. The File Share will be mounted only on the nodes which are executing the job. Must be used in conjunction with --storage-account-name and --storage-account-key arguments or AZURE_BATCHAI_STORAGE_ACCOUNT and AZURE_BATCHAI_STORAGE_KEY environment variables. If you need to mount more than one Azure File share, configure them in a configuration file and use --config option.')
        c.argument('afs_mount_path', options_list=['--afs-mount-path'], help='Relative mount path for Azure File Share. The File Share will be available at $AZ_BATCHAI_JOB_MOUNT_ROOT/<relative_mount_path> folder.')

    with self.argument_context('batchai job create', arg_group='Azure Storage Container Mount') as c:
        c.argument('container_name', options_list=['--bfs-name'], help='Name of Azure Storage Blob Container to mount during the job execution. The container will be mounted only on the nodes which are executing the job. Must be used in conjunction with --storage-account-name and --storage-account-key arguments or AZURE_BATCHAI_STORAGE_ACCOUNT and AZURE_BATCHAI_STORAGE_KEY environment variables. If you need to mount more than one Azure Storage container, configure them in a configuration file and use --config option.')
        c.argument('container_mount_path', options_list=['--bfs-mount-path'], help='Relative mount path for Azure Storage Blob Container. The container will be available at $AZ_BATCHAI_JOB_MOUNT_ROOT/<relative_mount_path> folder.')

    with self.argument_context('batchai job create', arg_group='Storage Account') as c:
        c.argument('account_name', options_list=['--storage-account-name'], help='Storage account name for Azure File Shares and/or Azure Storage Containers to be mounted. Related environment variable: AZURE_BATCHAI_STORAGE_ACCOUNT. Must be used in conjunction with --storage-account-key. If the key is not provided, the command will try to query the storage account key using the authenticated Azure account.')
        c.argument('account_key', options_list=['--storage-account-key'], help='Storage account key. Must be used in conjunction with --storage-account-name. Environment variable: AZURE_BATCHAI_STORAGE_KEY.')

    with self.argument_context('batchai job create', arg_group='File Server Mount') as c:
        c.argument('nfs_name', options_list=['--nfs-name'], help='Name of a file server to mount during the job execution. The NFS will be mounted only on the nodes which are executing the job. If you need to mount more than one file server, configure them in a configuration file and use --config option.')
        c.argument('nfs_resource_group', options_list=['--nfs-resource-group'], help='Resource group in which file server is created. Can be omitted if the file server and the job belong to the same resource group.')
        c.argument('nfs_mount_path', options_list=['--nfs-mount-path'], help='Relative mount path for NFS. The NFS will be available at $AZ_BATCHAI_JOB_MOUNT_ROOT/<relative_mount_path> folder.')

    with self.argument_context('batchai job list') as c:
        # Not implemented yet
        c.ignore('jobs_list_options')

    with self.argument_context('batchai job file stream') as c:
        c.argument('output_directory_id', options_list=['--output-directory-id', '-d'], help='The Id of the job\'s output directory (as specified by "id" element in outputDirectories collection in the job create parameters).')
        c.argument('file_name', options_list=['--file-name', '-f'], help='The name of the file to stream.')
        c.argument('path', options_list=['--path', '-p'], help='Relative path in the given output directory.')

    with self.argument_context('batchai job file list') as c:
        c.argument('output_directory_id', options_list=['--output-directory-id', '-d'], help='The Id of the job\'s output directory (as specified by "id" element in outputDirectories collection in the job create parameters).')
        c.argument('path', options_list=['--path', '-p'], help='Relative path in the given output directory.')
        c.argument('expiry', options_list=['--expiry', '-e'], type=int, help='Time in minutes for how long generated download URL should remain valid.')

    with self.argument_context('batchai job wait') as c:
        c.argument('check_interval_sec', options_list=['--interval'], help="Polling interval in sec.")

    with self.argument_context('batchai file-server') as c:
        c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create the file server. If default location is not configured, will default to the resource group\'s location')
        c.argument('file_server_name', options_list=['--name', '-n'], help='Name of the file server.')

    with self.argument_context('batchai file-server create') as c:
        c.argument('vm_size', options_list=['--vm-size', '-s'], help='VM size.', completer=get_vm_size_completion_list)
        c.argument('json_file', options_list=['--config', '-c'], help='A path to a json file containing file server create parameters (json representation of azure.mgmt.batchai.models.FileServerCreateParameters). Note, parameters given via command line will overwrite parameters specified in the configuration file.', arg_group='Advanced')

    with self.argument_context('batchai file-server create', arg_group='Storage Disks') as c:
        c.argument('disk_count', help='Number of disks.', type=int)
        c.argument('disk_size', help='Disk size in Gb.', type=int)
        c.argument('caching_type', arg_type=get_enum_type(['none', 'readonly', 'readwrite']), help='Caching type for premium disks. If not provided via command line or in configuration file, no caching will be used.')
        c.argument('storage_sku', arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']), help='The sku of storage account to persist VM.')

    with self.argument_context('batchai file-server create', arg_group='Admin Account') as c:
        c.argument('user_name', options_list=['--user-name', '-u'], help='Name of the admin user account to be created on NFS node. If the value is not provided and no user configuration is provided in the config file, current user\'s name will be used.')
        c.argument('ssh_key', options_list=['--ssh-key', '-k'], help='SSH public key value or path. If the value is not provided and no ssh public key or password is configured in the config file the default public ssh key (~/.ssh/id_rsa.pub) of the current user will be used (if available).', completer=FilesCompleter())
        c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory.')
        c.argument('password', options_list=['--password', '-p'], help='Password.')

    with self.argument_context('batchai file-server create', arg_group='Virtual Network') as c:
        c.argument('subnet', options_list=['--subnet'], help='Resource id of a virtual network subnet to put the file server in. If not provided via command line or in configuration file, Batch AI will create a new virtual network and subnet under your subscription.')

    with self.argument_context('batchai file-server list') as c:
        # Not implemented yet
        c.ignore('file_servers_list_options')
コード例 #7
0
ファイル: _params.py プロジェクト: zhangyan133/azure-cli
def load_arguments(self, _):
    # synapse workspace
    for scope in ['show', 'create', 'update', 'delete', 'activate']:
        with self.argument_context('synapse workspace ' + scope) as c:
            c.argument('workspace_name', arg_type=name_type, id_part='name', help='The workspace name.')

    for scope in ['create', 'update']:
        with self.argument_context('synapse workspace ' + scope) as c:
            c.argument('sql_admin_login_password', options_list=['--sql-admin-login-password', '-p'],
                       help='The sql administrator login password.')
            c.argument('tags', arg_type=tags_type)
            c.argument('allowed_aad_tenant_ids', options_list=['--allowed-tenant-ids'], nargs='+', help="The approved Azure AD tenants which outbound data traffic allowed to. The Azure AD tenant of the current user will be included by default. Use ""(\'""\' in PowerShell) to disable all allowed tenant ids.")
            c.argument('key_name', help='The workspace customer-managed key display name. All existing keys can be found using "az synapse workspace key list" cmdlet.')

    with self.argument_context('synapse workspace create') as c:
        c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
        c.argument("storage_account", validator=validate_storage_account,
                   help='The data lake storage account name or resource id.')
        c.argument('file_system', help='The file system of the data lake storage account.')
        c.argument('sql_admin_login_user', options_list=['--sql-admin-login-user', '-u'],
                   help='The sql administrator login user name.')
        c.argument('enable_managed_virtual_network', options_list=['--enable-managed-vnet',
                                                                   '--enable-managed-virtual-network'],
                   arg_type=get_three_state_flag(),
                   help='The flag indicates whether enable managed virtual network.')
        c.argument('prevent_data_exfiltration', arg_type=get_three_state_flag(),
                   help='The flag indicates whether enable data exfiltration.', options_list=['--prevent-exfiltration', '--prevent-data-exfiltration'])
        c.argument('key_identifier', help='The customer-managed key used to encrypt all data at rest in the workspace. Key identifier should be in the format of: https://{keyvaultname}.vault.azure.net/keys/{keyname}.', options_list=['--key-identifier', '--cmk'])

    with self.argument_context('synapse workspace check-name') as c:
        c.argument('name', arg_type=name_type, help='The name you wanted to check.')

    # synapse spark pool
    with self.argument_context('synapse spark pool') as c:
        c.argument('workspace_name', id_part='name', help='The workspace name.')

    with self.argument_context('synapse spark pool list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'update', 'delete']:
        with self.argument_context('synapse spark pool ' + scope) as c:
            c.argument('spark_pool_name', arg_type=name_type, id_part='child_name_1',
                       help='The name of the Spark pool.')

    with self.argument_context('synapse spark pool create') as c:
        # Node
        c.argument('node_count', type=int, arg_group='Node', help='The number of node.')
        c.argument('node_size_family', arg_group='Node', help='The node size family.')
        c.argument('node_size', arg_group='Node', arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The node size.')

        # AutoScale
        c.argument('enable_auto_scale', arg_type=get_three_state_flag(), arg_group='AutoScale',
                   help='The flag of enabling auto scale.')
        c.argument('max_node_count', type=int, arg_group='AutoScale', help='The max node count.')
        c.argument('min_node_count', type=int, arg_group='AutoScale', help='The min node count.')

        # AutoPause
        c.argument('enable_auto_pause', arg_type=get_three_state_flag(), arg_group='AutoPause',
                   help='The flag of enabling auto pause.')
        c.argument('delay', arg_group='AutoPause', help='The delay time whose unit is minute.')

        # Environment Configuration
        c.argument('library_requirements', arg_group='Environment Configuration',
                   help='The library requirements file.')

        # Default Folder
        c.argument('spark_events_folder', arg_group='Default Folder', help='The Spark events folder.')
        c.argument('spark_log_folder', arg_group='Default Folder', help='The default Spark log folder.')

        # Component Version
        c.argument('spark_version', arg_group='Component Version', help='The supported Spark version is 2.4 now.')

        c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse spark pool update') as c:
        c.argument('tags', arg_type=tags_type)
        # Node
        c.argument('node_count', type=int, arg_group='Node', help='The number of node.')
        c.argument('node_size_family', arg_group='Node', help='The node size family.')

        c.argument('node_size', arg_group='Node', arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The node size.')
        # AutoScale
        c.argument('enable_auto_scale', arg_type=get_three_state_flag(), arg_group='AutoScale',
                   help='The flag of enabling auto scale.')
        c.argument('max_node_count', type=int, arg_group='AutoScale', help='The max node count.')
        c.argument('min_node_count', type=int, arg_group='AutoScale', help='The min node count.')

        # AutoPause
        c.argument('enable_auto_pause', arg_type=get_three_state_flag(), arg_group='AutoPause',
                   help='The flag of enabling auto pause.')
        c.argument('delay', arg_group='AutoPause', help='The delay time whose unit is minute.')

        # Environment Configuration
        c.argument('library_requirements', arg_group='Environment Configuration',
                   help='The library requirements file.')
        c.argument('force', arg_type=get_three_state_flag(), help='The flag of force operation.')

    # synapse sql pool
    with self.argument_context('synapse sql pool') as c:
        c.argument('workspace_name', id_part='name', help='The workspace name.')

    with self.argument_context('synapse sql pool list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    with self.argument_context('synapse sql pool list-deleted') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'delete', 'update', 'pause', 'resume', 'restore', 'show-connection-string']:
        with self.argument_context('synapse sql pool ' + scope) as c:
            c.argument('sql_pool_name', arg_type=name_type, id_part='child_name_1', help='The SQL pool name.')

    with self.argument_context('synapse sql pool create') as c:
        c.argument('performance_level', help='The performance level.')
        c.argument('source_database_id', help='The source database id.')
        c.argument('recoverable_database_id', help='The recoverable database id.')
        c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse sql pool update') as c:
        c.argument('sku_name', options_list=['--performance-level'], help='The performance level.')
        c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse sql pool restore') as c:
        c.argument('performance_level', help='The performance level.')
        c.argument('destination_name', options_list=['--dest-name', '--destination-name'],
                   help='Name of the sql pool that will be created as the restore destination.')

        restore_point_arg_group = 'Restore Point'
        c.argument('restore_point_in_time',
                   options_list=['--time', '-t'],
                   arg_group=restore_point_arg_group,
                   help='The point in time of the source database that will be restored to create the new database. Must be greater than or equal to the source database\'s earliestRestoreDate value. Either --time or --deleted-time (or both) must be specified. {0}'.format(
                       time_format_help))
        c.argument('source_database_deletion_date',
                   options_list=['--deleted-time'],
                   arg_group=restore_point_arg_group,
                   help='If specified, restore from a deleted database instead of from an existing database. Must match the deleted time of a deleted database in the same server. Either --time or --deleted-time (or both) must be specified. {0}'.format(
                       time_format_help))

    with self.argument_context('synapse sql pool show-connection-string') as c:
        c.argument('client_provider',
                   options_list=['--client', '-c'],
                   help='Type of client connection provider.',
                   arg_type=get_enum_type(SqlPoolConnectionClientType))

        auth_group = 'Authentication'
        c.argument('auth_type',
                   options_list=['--auth-type', '-a'],
                   arg_group=auth_group,
                   help='Type of authentication.',
                   arg_type=get_enum_type(SqlPoolConnectionClientAuthenticationType))

    # synapse sql pool classification
    with self.argument_context('synapse sql pool classification') as c:
        c.argument('sql_pool_name', arg_type=name_type, id_part='child_name_1', help='The SQL pool name.')

    with self.argument_context('synapse sql pool classification list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    with self.argument_context('synapse sql pool classification recommendation list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')
        c.argument('include_disabled_recommendations', options_list=['--included-disabled'],
                   arg_type=get_three_state_flag(),
                   help='Indicates whether the result should include disabled recommendations')

    for scope in ['show', 'create', 'update', 'delete', 'recommendation enable', 'recommendation disable']:
        with self.argument_context('synapse sql pool classification ' + scope) as c:
            c.argument('schema_name', help='The name of schema.', options_list=['--schema'])
            c.argument('table_name', help='The name of table.', options_list=['--table'])
            c.argument('column_name', help='The name of column.', options_list=['--column'])
            c.argument('information_type', help='The information type.')
            c.argument('label_name', help='The label name.', options_list=['--label'])

    # synapse sql pool tde
    with self.argument_context('synapse sql pool tde') as c:
        c.argument('sql_pool_name', arg_type=name_type, id_part='child_name_1', help='The SQL pool name.')
        c.argument('status', arg_type=get_enum_type(TransparentDataEncryptionStatus),
                   required=True, help='Status of the transparent data encryption.')

    # synapse sql pool threat-policy
    with self.argument_context('synapse sql pool threat-policy') as c:
        c.argument('sql_pool_name', arg_type=name_type, id_part='child_name_1', help='The SQL pool name.')

    with self.argument_context('synapse sql pool threat-policy update') as c:
        _configure_security_or_audit_policy_storage_params(c)
        notification_arg_group = 'Notification'

        c.argument('state',
                   arg_group=policy_arg_group,
                   help='Threat detection policy state',
                   arg_type=get_enum_type(SecurityAlertPolicyState))
        c.argument('retention_days',
                   type=int,
                   arg_group=policy_arg_group,
                   help='The number of days to retain threat detection logs.')
        c.argument('disabled_alerts',
                   arg_group=policy_arg_group,
                   help='List of disabled alerts.',
                   nargs='+')
        c.argument('email_addresses',
                   arg_group=notification_arg_group,
                   help='List of email addresses that alerts are sent to.',
                   nargs='+')
        c.argument('email_account_admins',
                   arg_group=notification_arg_group,
                   help='Whether the alert is sent to the account administrators.',
                   arg_type=get_three_state_flag())

    # synapse sql pool audit-policy
    with self.argument_context('synapse sql pool audit-policy') as c:
        c.argument('sql_pool_name', arg_type=name_type, id_part='child_name_1', help='The SQL pool name.')

    for scope in ['synapse sql pool audit-policy', 'synapse sql audit-policy']:
        with self.argument_context(scope + ' update') as c:
            _configure_security_or_audit_policy_storage_params(c)
            c.argument('storage_account_subscription_id', arg_group=storage_arg_group,
                       options_list=['--storage-subscription'],
                       help='The subscription id of storage account')
            c.argument('is_storage_secondary_key_in_use', arg_group=storage_arg_group,
                       arg_type=get_three_state_flag(), options_list=['--use-secondary-key'],
                       help='Indicates whether using the secondary storeage key or not')
            c.argument('is_azure_monitor_target_enabled', options_list=['--enable-azure-monitor'],
                       help='Whether enabling azure monitor target or not.',
                       arg_type=get_three_state_flag())
            c.argument('state',
                       arg_group=policy_arg_group,
                       help='Auditing policy state',
                       arg_type=get_enum_type(BlobAuditingPolicyState))
            c.argument('audit_actions_and_groups',
                       options_list=['--actions'],
                       arg_group=policy_arg_group,
                       help='List of actions and action groups to audit.',
                       nargs='+')
            c.argument('retention_days',
                       type=int,
                       arg_group=policy_arg_group,
                       help='The number of days to retain audit logs.')

    with self.argument_context('synapse sql audit-policy update') as c:
        c.argument('queue_delay_milliseconds', type=int,
                   options_list=['--queue-delay-time', '--queue-delay-milliseconds'],
                   help='The amount of time in milliseconds that can elapse before audit actions are forced to be processed')

    with self.argument_context('synapse sql ad-admin') as c:
        c.argument('workspace_name', help='The workspace name.')
    for scope in ['create', 'update']:
        with self.argument_context('synapse sql ad-admin ' + scope) as c:
            c.argument('login_name', options_list=['--display-name', '-u'],
                       help='Display name of the Azure AD administrator user or group.')
            c.argument('object_id', options_list=['--object-id', '-i'],
                       help='The unique ID of the Azure AD administrator.')

    # synapse workspace firewall-rule
    with self.argument_context('synapse workspace firewall-rule') as c:
        c.argument('workspace_name', id_part='name', help='The workspace name.')

    with self.argument_context('synapse workspace firewall-rule list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'update', 'delete']:
        with self.argument_context('synapse workspace firewall-rule ' + scope) as c:
            c.argument('rule_name', arg_type=name_type, id_part='child_name_1', help='The IP firewall rule name')

    for scope in ['create', 'update']:
        with self.argument_context('synapse workspace firewall-rule ' + scope) as c:
            c.argument('start_ip_address', help='The start IP address of the firewall rule. Must be IPv4 format.')
            c.argument('end_ip_address', help='The end IP address of the firewall rule. Must be IPv4 format. '
                                              'Must be greater than or equal to startIpAddress.')

    # synapse workspace key
    with self.argument_context('synapse workspace key') as c:
        c.argument('workspace_name', id_part='name', help='The workspace name.')

    with self.argument_context('synapse workspace key list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'delete', 'update']:
        with self.argument_context('synapse workspace key ' + scope) as c:
            c.argument('key_name', arg_type=name_type, id_part='child_name_1', help='The workspace customer-managed key display name. All existing keys can be found using /"az synapse workspace key list/" cmdlet.')

    with self.argument_context('synapse workspace key create') as c:
        c.argument('key_identifier', help='The Key Vault Url of the workspace encryption key. should be in the format of: https://{keyvaultname}.vault.azure.net/keys/{keyname}.')

    with self.argument_context('synapse workspace key update') as c:
        c.argument('key_identifier', help='The Key Vault Url of the workspace encryption key. should be in the format of: https://{keyvaultname}.vault.azure.net/keys/{keyname}.')
        c.argument('is_active', arg_type=get_three_state_flag(), help='Set True to change the workspace state from pending to success state.')

    # synapse workspace managed-identity
    with self.argument_context('synapse workspace managed-identity') as c:
        c.argument('workspace_name', id_part='name', help='The workspace name.')

    for scope in ['grant-sql-access', 'revoke-sql-access', ' show-sql-access']:
        with self.argument_context('synapse workspace managed-identity ' + scope) as c:
            c.argument('workspace_name', id_part='name', help='The workspace name.')

    # synapse spark job
    for scope in ['job', 'session', 'statement']:
        with self.argument_context('synapse spark ' + scope) as c:
            c.argument('workspace_name', help='The name of the workspace.')
            c.argument('spark_pool_name', help='The name of the Spark pool.')

    for scope in ['synapse spark job', 'synapse spark session']:
        with self.argument_context(scope + ' list') as c:
            c.argument('from_index', help='Optional parameter specifying which index the list should begin from.')
            c.argument('size',
                       help='The size of the returned list.By default it is 20 and that is the maximum.')

    with self.argument_context('synapse spark job submit') as c:
        c.argument('main_definition_file', help='The main file used for the job.')
        c.argument('main_class_name',
                   help='The fully-qualified identifier or the main class that is in the main definition file.')
        c.argument('command_line_arguments', options_list=['--arguments'], nargs='+',
                   help='Optional arguments to the job (Note: please use storage URIs for file arguments).')
        c.argument('archives', nargs='+', help='The array of archives.')
        c.argument('job_name', arg_type=name_type, help='The Spark job name.')
        c.argument('reference_files', nargs='+',
                   help='Additional files used for reference in the main definition file.')
        c.argument('configuration', type=get_json_object, help='The configuration of Spark job.')
        c.argument('executors', help='The number of executors.')
        c.argument('executor_size', arg_type=get_enum_type(['Small', 'Medium', 'Large']), help='The executor size')
        c.argument('tags', arg_type=tags_type)
        c.argument('language', arg_type=get_enum_type(SparkBatchLanguage, default=SparkBatchLanguage.Scala),
                   help='The Spark job language.')

    for scope in ['show', 'cancel']:
        with self.argument_context('synapse spark job ' + scope) as c:
            c.argument('job_id', options_list=['--livy-id'], arg_group='Spark job',
                       help='The id of the Spark job.')

    with self.argument_context('synapse spark session create') as c:
        c.argument('job_name', arg_type=name_type, help='The Spark session name.')
        c.argument('reference_files', nargs='+',
                   help='Additional files used for reference in the main definition file.')
        c.argument('configuration', type=get_json_object, help='The configuration of Spark session.')
        c.argument('executors', help='The number of executors.')
        c.argument('executor_size', arg_type=get_enum_type(['Small', 'Medium', 'Large']), help='The executor size')
        c.argument('tags', arg_type=tags_type)

    for scope in ['show', 'cancel', 'reset-timeout']:
        with self.argument_context('synapse spark session ' + scope) as c:
            c.argument('session_id', options_list=['--livy-id'], arg_group='Spark Session',
                       help='The id of the Spark session job.')

    with self.argument_context('synapse spark statement') as c:
        c.argument('session_id', help='The id of Spark session.')

    for scope in ['show', 'cancel']:
        with self.argument_context('synapse spark statement ' + scope) as c:
            c.argument('statement_id', options_list=['--livy-id'], arg_group="Spark statement",
                       help='The id of the statement.')

    with self.argument_context('synapse spark statement invoke') as c:
        c.argument('code', completer=FilesCompleter(),
                   help='The code of Spark statement. This is either the code contents or use `@<file path>` to load the content from a file')
        c.argument('language', arg_type=get_enum_type(SparkStatementLanguage), validator=validate_statement_language,
                   help='The language of Spark statement.')

    # synapse workspace access-control
    for scope in ['create', 'list']:
        with self.argument_context('synapse role assignment ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('role', arg_type=role_arg_type)
            c.argument('assignee', arg_type=assignee_arg_type)

    with self.argument_context('synapse role assignment show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('role_assignment_id', options_list=['--id'],
                   help='Id of the role that is assigned to the principal.')

    with self.argument_context('synapse role assignment delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('role', arg_type=role_arg_type)
        c.argument('assignee', arg_type=assignee_arg_type)
        c.argument('ids', nargs='+',
                   help='space-separated role assignment ids. You should not provide --role or --assignee when --ids is provided.')

    with self.argument_context('synapse role definition show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('role', arg_type=role_arg_type)

    with self.argument_context('synapse role definition list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    # synapse artifacts linked-service
    for scope in ['create', 'set']:
        with self.argument_context('synapse linked-service ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('linked_service_name', arg_type=name_type, help='The linked service name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse linked-service list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse linked-service show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('linked_service_name', arg_type=name_type, help='The linked service name.')

    with self.argument_context('synapse linked-service delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('linked_service_name', arg_type=name_type, help='The linked service name.')

    # synapse artifacts dataset
    for scope in ['create', 'set']:
        with self.argument_context('synapse dataset ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('dataset_name', arg_type=name_type, help='The dataset name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse dataset list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse dataset show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('dataset_name', arg_type=name_type, help='The dataset name.')

    with self.argument_context('synapse dataset delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('dataset_name', arg_type=name_type, help='The dataset name.')

    # synapse artifacts pipeline
    for scope in ['create', 'set']:
        with self.argument_context('synapse pipeline ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('pipeline_name', arg_type=name_type, help='The pipeline name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse pipeline list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse pipeline show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('pipeline_name', arg_type=name_type, help='The pipeline name.')

    with self.argument_context('synapse pipeline delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('pipeline_name', arg_type=name_type, help='The pipeline name.')

    with self.argument_context('synapse pipeline create-run') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('pipeline_name', arg_type=name_type, help='The pipeline name.')
        c.argument('reference_pipeline_run_id', options_list=['--reference-pipeline-run-id', '--run-id'],
                   help='The pipeline run ID for rerun. If run ID is specified, the parameters of the specified run will be used to create a new run.')
        c.argument('is_recovery', arg_type=get_three_state_flag(),
                   help='Recovery mode flag. If recovery mode is set to true, the specified referenced pipeline run and the new run will be grouped under the same groupId.')
        c.argument('start_activity_name',
                   help='In recovery mode, the rerun will start from this activity. If not specified, all activities will run.')
        c.argument('parameters', completer=FilesCompleter(), type=shell_safe_json_parse,
                   help='Parameters for pipeline run. Can be supplied from a JSON file using the `@{path}` syntax or a JSON string.')

    # synapse artifacts pipeline run
    with self.argument_context('synapse pipeline-run query-by-workspace') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('continuation_token',
                   help='The continuation token for getting the next page of results. Null for first page.')
        c.argument('last_updated_after',
                   help='The time at or after which the run event was updated in \'ISO 8601\' format.')
        c.argument('last_updated_before',
                   help='The time at or before which the run event was updated in \'ISO 8601\' format.')
        c.argument('filters', action=AddFilters, nargs='*', help='List of filters.')
        c.argument('order_by', action=AddOrderBy, nargs='*', help='List of OrderBy option.')

    with self.argument_context('synapse pipeline-run show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('run_id', help='The pipeline run identifier.')

    with self.argument_context('synapse pipeline-run cancel') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('run_id', help='The pipeline run identifier.')
        c.argument('is_recursive', arg_type=get_three_state_flag(),
                   help='If true, cancel all the Child pipelines that are triggered by the current pipeline.')

    with self.argument_context('synapse activity-run query-by-pipeline-run') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('pipeline_name', arg_type=name_type, help='The pipeline name.')
        c.argument('run_id', help='The pipeline run identifier.')
        c.argument('continuation_token',
                   help='The continuation token for getting the next page of results. Null for first page.')
        c.argument('last_updated_after',
                   help='The time at or after which the run event was updated in \'ISO 8601\' format.')
        c.argument('last_updated_before',
                   help='The time at or before which the run event was updated in \'ISO 8601\' format.')
        c.argument('filters', action=AddFilters, nargs='*', help='List of filters.')
        c.argument('order_by', action=AddOrderBy, nargs='*', help='List of OrderBy option.')

    # synapse artifacts trigger
    for scope in ['create', 'set']:
        with self.argument_context('synapse trigger ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('trigger_name', arg_type=name_type, help='The trigger name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse trigger list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse trigger show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name', arg_type=name_type, help='The trigger name.')

    with self.argument_context('synapse trigger delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name', arg_type=name_type, help='The trigger name.')

    with self.argument_context('synapse trigger subscribe-to-event') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name', arg_type=name_type, help='The trigger name.')

    with self.argument_context('synapse trigger get-event-subscription-status') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name', arg_type=name_type, help='The trigger name.')

    with self.argument_context('synapse trigger unsubscribe-from-event') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name', arg_type=name_type, help='The trigger name.')

    with self.argument_context('synapse trigger start') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name', arg_type=name_type, help='The trigger name.')

    with self.argument_context('synapse trigger stop') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name', arg_type=name_type, help='The trigger name.')

    # synapse artifacts trigger run
    with self.argument_context('synapse trigger-run rerun') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name', arg_type=name_type, help='The trigger name.')
        c.argument('run_id', help='The trigger run identifier.')

    with self.argument_context('synapse trigger-run query-by-workspace') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('continuation_token',
                   help='The continuation token for getting the next page of results. Null for first page.')
        c.argument('last_updated_after',
                   help='The time at or after which the run event was updated in \'ISO 8601\' format.')
        c.argument('last_updated_before',
                   help='The time at or before which the run event was updated in \'ISO 8601\' format.')
        c.argument('filters', action=AddFilters, nargs='*', help='List of filters.')
        c.argument('order_by', action=AddOrderBy, nargs='*', help='List of OrderBy option.')

    # synapse artifacts data flow
    for scope in ['create', 'set']:
        with self.argument_context('synapse data-flow ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('data_flow_name', arg_type=name_type, help='The data flow name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse data-flow list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse data-flow show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('data_flow_name', arg_type=name_type, help='The data flow name.')

    with self.argument_context('synapse data-flow delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('data_flow_name', arg_type=name_type, help='The data flow name.')

    # synapse artifacts notebook
    for scope in ['create', 'set', 'import']:
        with self.argument_context('synapse notebook ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('notebook_name', arg_type=name_type, help='The notebook name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)
            c.argument('spark_pool_name', help='The name of the Spark pool.')
            c.argument('executor_size', arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                       help='Number of core and memory to be used for executors allocated in the specified Spark pool for the job.')
            c.argument('executor_count',
                       help='Number of executors to be allocated in the specified Spark pool for the job.')

    with self.argument_context('synapse notebook list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse notebook show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('notebook_name', arg_type=name_type, help='The notebook name.')

    with self.argument_context('synapse notebook export') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('output_folder', help='The folder where the notebook should be placed.')
        c.argument('notebook_name', arg_type=name_type, help='The notebook name.')

    with self.argument_context('synapse notebook delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('notebook_name', arg_type=name_type, help='The notebook name.')

    # synapse integration runtime
    with self.argument_context('synapse integration-runtime') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type, id_part='name')

    for scope in ['list', 'list-auth-key', 'wait']:
        with self.argument_context('synapse integration-runtime ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type, id_part=None)

    for scope in ['list-auth-key', 'wait']:
        with self.argument_context('synapse integration-runtime ' + scope) as c:
            c.argument('integration_runtime_name', arg_type=name_type, help='The integration runtime name.', id_part=None)

    for scope in ['show', 'create', 'delete', 'wait', 'update', 'upgrade', 'regenerate-auth-key', 'get-monitoring-data', 'sync-credentials', 'get-connection-info', 'get-status']:
        with self.argument_context('synapse integration-runtime ' + scope) as c:
            c.argument('integration_runtime_name', arg_type=name_type, help='The integration runtime name.', id_part='child_name_1')

    with self.argument_context('synapse integration-runtime create') as c:
        c.argument('integration_runtime_type', options_list=['--type'], arg_type=get_enum_type(['Managed', 'SelfHosted']), help='The integration runtime type.')
        c.argument('description', help='The integration runtime description.')
        c.argument('if_match', help='ETag of the integration runtime entity. Should only be specified for update, for '
                   'which it should match existing entity or can be * for unconditional update.')
        # Managed
        c.argument('location', arg_group='Managed', help='The integration runtime location.')
        c.argument('compute_type', arg_group='Managed', arg_type=get_enum_type(['General', 'MemoryOptimized', 'ComputeOptimized']),
                   help='Compute type of the data flow cluster which will execute data flow job.')
        c.argument('core_count', arg_group='Managed', help='Core count of the data flow cluster which will execute data flow job.')
        c.argument('time_to_live', arg_group='Managed', help='Time to live (in minutes) setting of the data flow cluster which will execute data flow job.')

    with self.argument_context('synapse integration-runtime update') as c:
        c.argument('auto_update', arg_type=get_enum_type(['On', 'Off']), help='Enable or disable the self-hosted integration runtime auto-update.')
        c.argument('update_delay_offset', help='The time of the day for the self-hosted integration runtime auto-update.')

    with self.argument_context('synapse integration-runtime regenerate-auth-key') as c:
        c.argument('key_name', arg_type=get_enum_type(['authKey1', 'authKey2']), help='The name of the authentication key to regenerate.')

    with self.argument_context('synapse integration-runtime-node') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type, id_part='name')
        c.argument('integration_runtime_name', arg_type=name_type, help='The integration runtime name.', id_part='child_name_1')

    for scope in ['show', 'update', 'delete', 'get-ip-address']:
        with self.argument_context('synapse integration-runtime-node ' + scope) as c:
            c.argument('node_name', help='The integration runtime node name.')

    with self.argument_context('synapse integration-runtime-node update') as c:
        c.argument('concurrent_jobs_limit', options_list=['--concurrent-jobs'], help='The number of concurrent jobs permitted to '
                   'run on the integration runtime node. Values between 1 and maxConcurrentJobs are allowed.')
コード例 #8
0
ファイル: _params.py プロジェクト: zhangyan133/azure-cli
    get_resource_name_completion_list, get_location_type
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.util import get_json_object, shell_safe_json_parse
from ._validators import validate_storage_account, validate_statement_language
from ._completers import get_role_definition_name_completion_list
from .constant import SparkBatchLanguage, SparkStatementLanguage, SqlPoolConnectionClientType, \
    SqlPoolConnectionClientAuthenticationType
from .action import AddFilters, AddOrderBy

workspace_name_arg_type = CLIArgumentType(help='The workspace name.',
                                          completer=get_resource_name_completion_list('Microsoft.Synapse/workspaces'))
assignee_arg_type = CLIArgumentType(
    help='Represent a user, group, or service principal. Supported format: object id, user sign-in name, or service principal name.')
role_arg_type = CLIArgumentType(help='The role name/id that is assigned to the principal.',
                                completer=get_role_definition_name_completion_list)
definition_file_arg_type = CLIArgumentType(options_list=['--file'], completer=FilesCompleter(),
                                           type=shell_safe_json_parse,
                                           help='Properties may be supplied from a JSON file using the `@{path}` syntax or a JSON string.')
time_format_help = 'Time should be in following format: "YYYY-MM-DDTHH:MM:SS".'
storage_arg_group = "Storage"
policy_arg_group = 'Policy'


def _configure_security_or_audit_policy_storage_params(arg_ctx):
    arg_ctx.argument('storage_account',
                     options_list=['--storage-account'],
                     arg_group=storage_arg_group,
                     help='Name of the storage account.')

    arg_ctx.argument('storage_account_access_key',
                     options_list=['--storage-key'],
コード例 #9
0
def load_arguments(self, _):
    resources_directory_type = CLIArgumentType(
        options_list=["--resources-directory", "-d"],
        completer=DirectoriesCompleter(),
        type=file_type,
        help="Directory which contains the resources",
    )
    resource_file_type = CLIArgumentType(
        options_list=["--resource-file", "-f"],
        completer=FilesCompleter(allowednames=["json", "yaml"]),
        type=file_type,
        help="Resource file path",
    )
    resource_schema_type = CLIArgumentType(
        options_list=["--resource-schema", "-s"],
        completer=FilesCompleter(allowednames=["json", "yaml"], directories=False),
        type=file_type,
        help="Resource schema file path",
    )
    resource_type = CLIArgumentType(
        options_list=["--resource-type", "-t"],
        choices=["scheduled_detection", "microsoft_security_detection", "data_source"],
        help="Resource type",
    )
    aux_subscription_type = CLIArgumentType(
        options_list=["--aux-subscriptions"],
        help="Auxiliary subscriptions for multi-tenant resource deployment such as cross tenant Logic App linking",
    )

    with self.argument_context("sentinel") as c:
        c.argument(
            "workspace_name",
            options_list=["--workspace-name", "-n"],
            help="Name of the Sentinel Workspace",
        )

    with self.argument_context("sentinel create") as c:
        c.argument("aux_subscriptions", aux_subscription_type)
        c.argument("resource_type", resource_type)
        c.argument("resources_directory", resources_directory_type)
        c.argument("resource_file", resource_file_type)
        c.argument(
            "enable_validation",
            options_list=["--enable-validation"],
            arg_type=get_three_state_flag(),
            help="Enable/Disable resource validation before deploying it",
        )
        c.argument("resource_schema", resource_schema_type)

    with self.argument_context("sentinel validate") as c:
        c.argument("resource_type", resource_type)
        c.argument("resources_directory", resources_directory_type)
        c.argument("resource_file", resource_file_type)
        c.argument("resource_schema", resource_schema_type)

    with self.argument_context("sentinel generate") as c:
        c.argument("resource_type", resource_type)
        c.argument("resources_directory", resources_directory_type)
        c.argument(
            "skip_interactive",
            options_list=["--skip-interactive"],
            arg_type=get_three_state_flag(),
            help="Enable/Disable interactive resource generation",
        )
        # TODO: Add all detection configurations as arguments here
        c.argument(
            "name",
            options_list=["--name", "-n"],
            help="Name of your resource(alphanumeric without spaces)",
        )
        c.argument(
            "create_directory",
            options_list=["--create-dir"],
            arg_type=get_three_state_flag(),
            help="Enable/Disable creating new directory for the resource",
        )
        c.argument(
            "with_documentation",
            options_list=["--with-documentation", "--doc"],
            arg_type=get_three_state_flag(),
            help="Enable/Disable resource documentation",
        )
コード例 #10
0
from .action import AddFilters, AddOrderBy

workspace_name_arg_type = CLIArgumentType(
    help='The workspace name.',
    completer=get_resource_name_completion_list(
        'Microsoft.Synapse/workspaces'))
assignee_arg_type = CLIArgumentType(
    help=
    'Represent a user, group, or service principal. Supported format: object id, user sign-in name, or service principal name.'
)
role_arg_type = CLIArgumentType(
    help='The role name/id that is assigned to the principal.',
    completer=get_role_definition_name_completion_list)
definition_file_arg_type = CLIArgumentType(
    options_list=['--file'],
    completer=FilesCompleter(),
    type=shell_safe_json_parse,
    help=
    'Properties may be supplied from a JSON file using the `@{path}` syntax or a JSON string.'
)
time_format_help = 'Time should be in following format: "YYYY-MM-DDTHH:MM:SS".'
storage_arg_group = "Storage"
policy_arg_group = 'Policy'


def _configure_security_or_audit_policy_storage_params(arg_ctx):
    arg_ctx.argument('storage_account',
                     options_list=['--storage-account'],
                     arg_group=storage_arg_group,
                     help='Name of the storage account.')
コード例 #11
0
ファイル: _params.py プロジェクト: jaysterp/azure-cli
def load_arguments(self, _):
    # synapse workspace
    for scope in ['show', 'create', 'update', 'delete']:
        with self.argument_context('synapse workspace ' + scope) as c:
            c.argument('workspace_name',
                       arg_type=name_type,
                       id_part='name',
                       help='The workspace name.')

    for scope in ['create', 'update']:
        with self.argument_context('synapse workspace ' + scope) as c:
            c.argument('sql_admin_login_password',
                       options_list=['--sql-admin-login-password', '-p'],
                       help='The sql administrator login password.')
            c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse workspace create') as c:
        c.argument("storage_account",
                   validator=validate_storage_account,
                   help='The data lake storage account name or resource id.')
        c.argument('file_system',
                   help='The file system of the data lake storage account.')
        c.argument('sql_admin_login_user',
                   options_list=['--sql-admin-login-user', '-u'],
                   help='The sql administrator login user name.')

    with self.argument_context('synapse workspace check-name') as c:
        c.argument('name',
                   arg_type=name_type,
                   help='The name you wanted to check.')

    # synapse spark pool
    with self.argument_context('synapse spark pool') as c:
        c.argument('workspace_name',
                   id_part='name',
                   help='The workspace name.')

    with self.argument_context('synapse spark pool list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'update', 'delete']:
        with self.argument_context('synapse spark pool ' + scope) as c:
            c.argument('spark_pool_name',
                       arg_type=name_type,
                       id_part='child_name_1',
                       help='The name of the Spark pool.')

    with self.argument_context('synapse spark pool create') as c:
        # Node
        c.argument('node_count',
                   type=int,
                   arg_group='Node',
                   help='The number of node.')
        c.argument('node_size_family',
                   arg_group='Node',
                   help='The node size family.')
        c.argument('node_size',
                   arg_group='Node',
                   arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The node size.')

        # AutoScale
        c.argument('enable_auto_scale',
                   arg_type=get_three_state_flag(),
                   arg_group='AutoScale',
                   help='The flag of enabling auto scale.')
        c.argument('max_node_count',
                   type=int,
                   arg_group='AutoScale',
                   help='The max node count.')
        c.argument('min_node_count',
                   type=int,
                   arg_group='AutoScale',
                   help='The min node count.')

        # AutoPause
        c.argument('enable_auto_pause',
                   arg_type=get_three_state_flag(),
                   arg_group='AutoPause',
                   help='The flag of enabling auto pause.')
        c.argument('delay',
                   arg_group='AutoPause',
                   help='The delay time whose unit is minute.')

        # Environment Configuration
        c.argument('library_requirements',
                   arg_group='Environment Configuration',
                   help='The library requirements file.')

        # Default Folder
        c.argument('spark_events_folder',
                   arg_group='Default Folder',
                   help='The Spark events folder.')
        c.argument('spark_log_folder',
                   arg_group='Default Folder',
                   help='The default Spark log folder.')

        # Component Version
        c.argument('spark_version',
                   arg_group='Component Version',
                   help='The supported Spark version is 2.4 now.')

        c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse spark pool update') as c:
        c.argument('tags', arg_type=tags_type)
        # Node
        c.argument('node_count',
                   type=int,
                   arg_group='Node',
                   help='The number of node.')
        c.argument('node_size_family',
                   arg_group='Node',
                   help='The node size family.')

        c.argument('node_size',
                   arg_group='Node',
                   arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The node size.')
        # AutoScale
        c.argument('enable_auto_scale',
                   arg_type=get_three_state_flag(),
                   arg_group='AutoScale',
                   help='The flag of enabling auto scale.')
        c.argument('max_node_count',
                   type=int,
                   arg_group='AutoScale',
                   help='The max node count.')
        c.argument('min_node_count',
                   type=int,
                   arg_group='AutoScale',
                   help='The min node count.')

        # AutoPause
        c.argument('enable_auto_pause',
                   arg_type=get_three_state_flag(),
                   arg_group='AutoPause',
                   help='The flag of enabling auto pause.')
        c.argument('delay',
                   arg_group='AutoPause',
                   help='The delay time whose unit is minute.')

        # Environment Configuration
        c.argument('library_requirements',
                   arg_group='Environment Configuration',
                   help='The library requirements file.')
        c.argument('force',
                   arg_type=get_three_state_flag(),
                   help='The flag of force operation.')

    # synapse sql pool
    with self.argument_context('synapse sql pool') as c:
        c.argument('workspace_name',
                   id_part='name',
                   help='The workspace name.')

    with self.argument_context('synapse sql pool list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'delete', 'update', 'pause', 'resume']:
        with self.argument_context('synapse sql pool ' + scope) as c:
            c.argument('sql_pool_name',
                       arg_type=name_type,
                       id_part='child_name_1',
                       help='The SQL pool name.')

    with self.argument_context('synapse sql pool create') as c:
        c.argument('performance_level', help='The performance level.')
        c.argument('source_database_id', help='The source database id.')
        c.argument('recoverable_database_id',
                   help='The recoverable database id.')
        c.argument('tags', arg_type=tags_type)

    with self.argument_context('synapse sql pool update') as c:
        c.argument('sku_name',
                   options_list=['--performance-level'],
                   help='The performance level.')
        c.argument('tags', arg_type=tags_type)

    # synapse workspace firewall-rule
    with self.argument_context('synapse workspace firewall-rule') as c:
        c.argument('workspace_name',
                   id_part='name',
                   help='The workspace name.')

    with self.argument_context('synapse workspace firewall-rule list') as c:
        c.argument('workspace_name', id_part=None, help='The workspace name.')

    for scope in ['show', 'create', 'delete']:
        with self.argument_context('synapse workspace firewall-rule ' +
                                   scope) as c:
            c.argument('rule_name',
                       arg_type=name_type,
                       id_part='child_name_1',
                       help='The IP firewall rule name')

    with self.argument_context('synapse workspace firewall-rule create') as c:
        c.argument(
            'start_ip_address',
            help=
            'The start IP address of the firewall rule. Must be IPv4 format.')
        c.argument(
            'end_ip_address',
            help='The end IP address of the firewall rule. Must be IPv4 format. '
            'Must be greater than or equal to startIpAddress.')

    # synapse spark job
    for scope in ['job', 'session', 'statement']:
        with self.argument_context('synapse spark ' + scope) as c:
            c.argument('workspace_name', help='The name of the workspace.')
            c.argument('spark_pool_name', help='The name of the Spark pool.')

    for scope in ['synapse spark job', 'synapse spark session']:
        with self.argument_context(scope + ' list') as c:
            c.argument(
                'from_index',
                help=
                'Optional parameter specifying which index the list should begin from.'
            )
            c.argument(
                'size',
                help=
                'The size of the returned list.By default it is 20 and that is the maximum.'
            )

    with self.argument_context('synapse spark job submit') as c:
        c.argument('main_definition_file',
                   help='The main file used for the job.')
        c.argument(
            'main_class_name',
            help=
            'The fully-qualified identifier or the main class that is in the main definition file.'
        )
        c.argument(
            'command_line_arguments',
            options_list=['--arguments'],
            nargs='+',
            help=
            'Optional arguments to the job (Note: please use storage URIs for file arguments).'
        )
        c.argument('archives', nargs='+', help='The array of archives.')
        c.argument('job_name', arg_type=name_type, help='The Spark job name.')
        c.argument(
            'reference_files',
            nargs='+',
            help=
            'Additional files used for reference in the main definition file.')
        c.argument('configuration',
                   type=get_json_object,
                   help='The configuration of Spark job.')
        c.argument('executors', help='The number of executors.')
        c.argument('executor_size',
                   arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The executor size')
        c.argument('tags', arg_type=tags_type)
        c.argument('language',
                   arg_type=get_enum_type(SparkBatchLanguage,
                                          default=SparkBatchLanguage.Scala),
                   help='The Spark job language.')

    for scope in ['show', 'cancel']:
        with self.argument_context('synapse spark job ' + scope) as c:
            c.argument('job_id',
                       options_list=['--livy-id'],
                       arg_group='Spark job',
                       help='The id of the Spark job.')

    with self.argument_context('synapse spark session create') as c:
        c.argument('job_name',
                   arg_type=name_type,
                   help='The Spark session name.')
        c.argument(
            'reference_files',
            nargs='+',
            help=
            'Additional files used for reference in the main definition file.')
        c.argument('configuration',
                   type=get_json_object,
                   help='The configuration of Spark session.')
        c.argument('executors', help='The number of executors.')
        c.argument('executor_size',
                   arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                   help='The executor size')
        c.argument('tags', arg_type=tags_type)

    for scope in ['show', 'cancel', 'reset-timeout']:
        with self.argument_context('synapse spark session ' + scope) as c:
            c.argument('session_id',
                       options_list=['--livy-id'],
                       arg_group='Spark Session',
                       help='The id of the Spark session job.')

    with self.argument_context('synapse spark statement') as c:
        c.argument('session_id', help='The id of Spark session.')

    for scope in ['show', 'cancel']:
        with self.argument_context('synapse spark statement ' + scope) as c:
            c.argument('statement_id',
                       options_list=['--livy-id'],
                       arg_group="Spark statement",
                       help='The id of the statement.')

    with self.argument_context('synapse spark statement invoke') as c:
        c.argument(
            'code',
            completer=FilesCompleter(),
            help=
            'The code of Spark statement. This is either the code contents or use `@<file path>` to load the content from a file'
        )
        c.argument('language',
                   arg_type=get_enum_type(SparkStatementLanguage),
                   validator=validate_statement_language,
                   help='The language of Spark statement.')

    # synapse workspace access-control
    for scope in ['create', 'list']:
        with self.argument_context('synapse role assignment ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('role', arg_type=role_arg_type)
            c.argument('assignee', arg_type=assignee_arg_type)

    with self.argument_context('synapse role assignment show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('role_assignment_id',
                   options_list=['--id'],
                   help='Id of the role that is assigned to the principal.')

    with self.argument_context('synapse role assignment delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('role', arg_type=role_arg_type)
        c.argument('assignee', arg_type=assignee_arg_type)
        c.argument(
            'ids',
            nargs='+',
            help=
            'space-separated role assignment ids. You should not provide --role or --assignee when --ids is provided.'
        )

    with self.argument_context('synapse role definition show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('role', arg_type=role_arg_type)

    with self.argument_context('synapse role definition list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    # synapse artifacts linked-service
    for scope in ['create', 'set']:
        with self.argument_context('synapse linked-service ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('linked_service_name',
                       arg_type=name_type,
                       help='The linked service name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse linked-service list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse linked-service show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('linked_service_name',
                   arg_type=name_type,
                   help='The linked service name.')

    with self.argument_context('synapse linked-service delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('linked_service_name',
                   arg_type=name_type,
                   help='The linked service name.')

    # synapse artifacts dataset
    for scope in ['create', 'set']:
        with self.argument_context('synapse dataset ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('dataset_name',
                       arg_type=name_type,
                       help='The dataset name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse dataset list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse dataset show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('dataset_name',
                   arg_type=name_type,
                   help='The dataset name.')

    with self.argument_context('synapse dataset delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('dataset_name',
                   arg_type=name_type,
                   help='The dataset name.')

    # synapse artifacts pipeline
    for scope in ['create', 'set']:
        with self.argument_context('synapse pipeline ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('pipeline_name',
                       arg_type=name_type,
                       help='The pipeline name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse pipeline list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse pipeline show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('pipeline_name',
                   arg_type=name_type,
                   help='The pipeline name.')

    with self.argument_context('synapse pipeline delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('pipeline_name',
                   arg_type=name_type,
                   help='The pipeline name.')

    with self.argument_context('synapse pipeline create-run') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('pipeline_name',
                   arg_type=name_type,
                   help='The pipeline name.')
        c.argument(
            'reference_pipeline_run_id',
            options_list=['--reference-pipeline-run-id', '--run-id'],
            help=
            'The pipeline run ID for rerun. If run ID is specified, the parameters of the specified run will be used to create a new run.'
        )
        c.argument(
            'is_recovery',
            arg_type=get_three_state_flag(),
            help=
            'Recovery mode flag. If recovery mode is set to true, the specified referenced pipeline run and the new run will be grouped under the same groupId.'
        )
        c.argument(
            'start_activity_name',
            help=
            'In recovery mode, the rerun will start from this activity. If not specified, all activities will run.'
        )
        c.argument(
            'parameters',
            completer=FilesCompleter(),
            type=shell_safe_json_parse,
            help=
            'Parameters for pipeline run. Can be supplied from a JSON file using the `@{path}` syntax or a JSON string.'
        )

    # synapse artifacts pipeline run
    with self.argument_context('synapse pipeline-run query-by-workspace') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument(
            'continuation_token',
            help=
            'The continuation token for getting the next page of results. Null for first page.'
        )
        c.argument(
            'last_updated_after',
            help=
            'The time at or after which the run event was updated in \'ISO 8601\' format.'
        )
        c.argument(
            'last_updated_before',
            help=
            'The time at or before which the run event was updated in \'ISO 8601\' format.'
        )
        c.argument('filters',
                   action=AddFilters,
                   nargs='*',
                   help='List of filters.')
        c.argument('order_by',
                   action=AddOrderBy,
                   nargs='*',
                   help='List of OrderBy option.')

    with self.argument_context('synapse pipeline-run show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('run_id', help='The pipeline run identifier.')

    with self.argument_context('synapse pipeline-run cancel') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('run_id', help='The pipeline run identifier.')
        c.argument(
            'is_recursive',
            arg_type=get_three_state_flag(),
            help=
            'If true, cancel all the Child pipelines that are triggered by the current pipeline.'
        )

    with self.argument_context(
            'synapse activity-run query-by-pipeline-run') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('pipeline_name',
                   arg_type=name_type,
                   help='The pipeline name.')
        c.argument('run_id', help='The pipeline run identifier.')
        c.argument(
            'continuation_token',
            help=
            'The continuation token for getting the next page of results. Null for first page.'
        )
        c.argument(
            'last_updated_after',
            help=
            'The time at or after which the run event was updated in \'ISO 8601\' format.'
        )
        c.argument(
            'last_updated_before',
            help=
            'The time at or before which the run event was updated in \'ISO 8601\' format.'
        )
        c.argument('filters',
                   action=AddFilters,
                   nargs='*',
                   help='List of filters.')
        c.argument('order_by',
                   action=AddOrderBy,
                   nargs='*',
                   help='List of OrderBy option.')

    # synapse artifacts trigger
    for scope in ['create', 'set']:
        with self.argument_context('synapse trigger ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('trigger_name',
                       arg_type=name_type,
                       help='The trigger name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse trigger list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse trigger show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name',
                   arg_type=name_type,
                   help='The trigger name.')

    with self.argument_context('synapse trigger delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name',
                   arg_type=name_type,
                   help='The trigger name.')

    with self.argument_context('synapse trigger subscribe-to-event') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name',
                   arg_type=name_type,
                   help='The trigger name.')

    with self.argument_context(
            'synapse trigger get-event-subscription-status') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name',
                   arg_type=name_type,
                   help='The trigger name.')

    with self.argument_context('synapse trigger unsubscribe-from-event') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name',
                   arg_type=name_type,
                   help='The trigger name.')

    with self.argument_context('synapse trigger start') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name',
                   arg_type=name_type,
                   help='The trigger name.')

    with self.argument_context('synapse trigger stop') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name',
                   arg_type=name_type,
                   help='The trigger name.')

    # synapse artifacts trigger run
    with self.argument_context('synapse trigger-run rerun') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('trigger_name',
                   arg_type=name_type,
                   help='The trigger name.')
        c.argument('run_id', help='The trigger run identifier.')

    with self.argument_context('synapse trigger-run query-by-workspace') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument(
            'continuation_token',
            help=
            'The continuation token for getting the next page of results. Null for first page.'
        )
        c.argument(
            'last_updated_after',
            help=
            'The time at or after which the run event was updated in \'ISO 8601\' format.'
        )
        c.argument(
            'last_updated_before',
            help=
            'The time at or before which the run event was updated in \'ISO 8601\' format.'
        )
        c.argument('filters',
                   action=AddFilters,
                   nargs='*',
                   help='List of filters.')
        c.argument('order_by',
                   action=AddOrderBy,
                   nargs='*',
                   help='List of OrderBy option.')

    # synapse artifacts data flow
    for scope in ['create', 'set']:
        with self.argument_context('synapse data-flow ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('data_flow_name',
                       arg_type=name_type,
                       help='The data flow name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)

    with self.argument_context('synapse data-flow list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse data-flow show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('data_flow_name',
                   arg_type=name_type,
                   help='The data flow name.')

    with self.argument_context('synapse data-flow delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('data_flow_name',
                   arg_type=name_type,
                   help='The data flow name.')

    # synapse artifacts notebook
    for scope in ['create', 'set', 'import']:
        with self.argument_context('synapse notebook ' + scope) as c:
            c.argument('workspace_name', arg_type=workspace_name_arg_type)
            c.argument('notebook_name',
                       arg_type=name_type,
                       help='The notebook name.')
            c.argument('definition_file', arg_type=definition_file_arg_type)
            c.argument('spark_pool_name', help='The name of the Spark pool.')
            c.argument(
                'executor_size',
                arg_type=get_enum_type(['Small', 'Medium', 'Large']),
                help=
                'Number of core and memory to be used for executors allocated in the specified Spark pool for the job.'
            )
            c.argument(
                'executor_count',
                help=
                'Number of executors to be allocated in the specified Spark pool for the job.'
            )

    with self.argument_context('synapse notebook list') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)

    with self.argument_context('synapse notebook show') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('notebook_name',
                   arg_type=name_type,
                   help='The notebook name.')

    with self.argument_context('synapse notebook export') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('output_folder',
                   help='The folder where the notebook should be placed.')
        c.argument('notebook_name',
                   arg_type=name_type,
                   help='The notebook name.')

    with self.argument_context('synapse notebook delete') as c:
        c.argument('workspace_name', arg_type=workspace_name_arg_type)
        c.argument('notebook_name',
                   arg_type=name_type,
                   help='The notebook name.')
コード例 #12
0
def parser(prog='kcd') -> argparse.ArgumentParser:
    p = argparse.ArgumentParser(prog=prog)
    # not used yet, but left here as a reminder to not steal the -c flag
    # p.add_argument('-c', '--config-file', help='path to configuration file')
    p.add_argument(
        '-f',
        '--environments-file',
        help=
        'KubeCD config file file (default $KUBECD_ENVIRONMENTS or "environments.yaml")',
        metavar='FILE',
        default=os.getenv('KUBECD_ENVIRONMENTS',
                          'environments.yaml')).completer = FilesCompleter(
                              allowednames=('.yaml', '.yml'),
                              directories=False)
    p.add_argument('--version',
                   help='Show version and exit.',
                   action='version',
                   version='kubecd ' + __version__)
    p.add_argument('--verbose',
                   '-v',
                   help='Increase verbosity level',
                   action='count',
                   default=0)

    s = p.add_subparsers(dest='command',
                         title='Subcommands',
                         description='Use one of these sub-commands:')

    apply = s.add_parser('apply', help='apply changes to Kubernetes')
    apply.add_argument('--dry-run',
                       '-n',
                       action='store_true',
                       default=False,
                       help='dry run mode, only print commands')
    apply.add_argument('--debug',
                       action='store_true',
                       default=False,
                       help='run helm with --debug')
    apply.add_argument('--releases',
                       '-r',
                       action='append',
                       help='apply only these releases')
    apply.add_argument('--cluster',
                       '-c',
                       nargs='?',
                       metavar='CLUSTER',
                       help='apply all environments in CLUSTER')
    apply.add_argument('--init',
                       action='store_true',
                       default=False,
                       help='Initialize credentials and contexts')
    apply.add_argument(
        'env_name',
        nargs='?',
        metavar='ENV',
        help=
        'name of environment to apply, must be specified unless --cluster is')
    apply.set_defaults(func=apply_env)

    # diff = s.add_parser('diff', help='show diffs between running and git release')
    # diff.add_argument('--releases', '-r', help='which releases to diff', action='append')
    # diff.add_argument('env', nargs='?', help='name of environment')
    # diff.set_defaults(func=diff_release)

    poll_p = s.add_parser('poll', help='poll for new images in registries')
    poll_p.add_argument('--patch',
                        '-p',
                        action='store_true',
                        help='patch releases.yaml files with updated version')
    poll_p.add_argument('--releases',
                        '-r',
                        action='append',
                        help='poll this specific release')
    poll_p.add_argument('--image', '-i', help='poll releases using this image')
    poll_p.add_argument('--cluster',
                        '-c',
                        help='poll all releases in this cluster')
    poll_p.add_argument('env', nargs='?', help='name of environment to poll')
    poll_p.set_defaults(func=poll_registries)

    dump_p = s.add_parser('dump',
                          help='dump commands for one or all environments')
    dump_p.add_argument('env', nargs='?', help='name of environment to dump')
    dump_p.set_defaults(func=dump_env)

    list_p = s.add_parser('list',
                          help='list clusters, environments or releases')
    list_p.add_argument('kind',
                        choices=['env', 'release', 'cluster'],
                        help='what to list')
    list_p.set_defaults(func=list_kind)

    indent_p = s.add_parser('indent', help='canonically indent YAML files')
    indent_p.add_argument('files', nargs='+', help='file[s] to indent')
    indent_p.set_defaults(func=indent_file)

    observe = s.add_parser('observe', help='observe a new image version')
    observe.add_argument('--image',
                         '-i',
                         metavar='IMAGE:TAG',
                         help='the image, including tag')
    observe.add_argument('--patch',
                         action='store_true',
                         default=False,
                         help='patch release files with updated tags')
    observe.add_argument('--submit-pr',
                         action='store_true',
                         default=False,
                         help='submit a pull request with the updated tags')
    observe.set_defaults(func=observe_new_image)

    completion_p = s.add_parser('completion',
                                help='print shell completion script')
    completion_p.set_defaults(func=print_completion, prog=prog)

    j2y = s.add_parser('json2yaml',
                       help='JSON to YAML conversion utility (stdin/stdout)')
    j2y.set_defaults(func=json2yaml)

    init = s.add_parser('init', help='Initialize credentials and contexts')
    init.add_argument(
        '--cluster',
        help='Initialize contexts for all environments in a cluster')
    init.add_argument('--dry-run',
                      '-n',
                      action='store_true',
                      help='print commands instead of running them')
    init.add_argument('env_name',
                      metavar='ENV',
                      nargs='?',
                      help='environment to initialize')
    init.add_argument(
        '--contexts-only',
        action='store_true',
        help=
        'initialize contexts only, assuming that cluster credentials are set up'
    )
    init.set_defaults(func=init_contexts)

    use = s.add_parser('use',
                       help='switch kube context to the specified environment')
    use.add_argument('env', metavar='ENV', help='environment name')
    use.set_defaults(func=use_env_context)

    lint = s.add_parser(
        'lint',
        help=
        'inspect the contents of a release, exits with non-0 if there are issues'
    )
    lint.add_argument('--cluster', help='Lint all environments in a cluster')
    lint.add_argument('env_name',
                      metavar='ENV',
                      nargs='?',
                      help='environment name')
    lint.set_defaults(func=lint_environment)

    return p
コード例 #13
0
ファイル: _params.py プロジェクト: hpsan/azure-cli-extensions
def load_arguments(self, _):
    detections_directory_type = CLIArgumentType(options_list=['--detections-directory', '-d'],
                                                completer=DirectoriesCompleter(), type=file_type,
                                                help='Directory which contains the detection files')
    detection_file_type = CLIArgumentType(options_list=['--detection-file', '-f'],
                                          completer=FilesCompleter(allowednames=['json', 'yaml']),
                                          type=file_type, help="File path of the detection")
    detection_schema_type = CLIArgumentType(options_list=['--detection-schema', '-s'],
                                            completer=FilesCompleter(allowednames=['json', 'yaml'], directories=False),
                                            type=file_type, help="File path of the detection schema")
    data_sources_directory_type = CLIArgumentType(options_list=['--data-sources-directory', '-d'],
                                                  completer=DirectoriesCompleter(), type=file_type,
                                                  help='Directory which contains data source files')
    data_source_file_type = CLIArgumentType(options_list=['--data-source-file', '-f'],
                                            completer=FilesCompleter(allowednames=['json', 'yaml']),
                                            type=file_type, help="File path of the data source")
    data_source_schema_type = CLIArgumentType(options_list=['--data-source-schema', '-s'],
                                              completer=FilesCompleter(allowednames=['json', 'yaml'],
                                                                       directories=False),
                                              type=file_type, help="File path of the data source schema")

    with self.argument_context('sentinel') as c:
        c.argument('workspace_name', options_list=['--workspace-name', '-n'], help='Name of the Sentinel Workspace')

    with self.argument_context('sentinel detection create') as c:
        c.argument('detections_directory', detections_directory_type)
        c.argument('detection_file', detection_file_type)
        c.argument('enable_validation', options_list=['--enable-validation'],
                   arg_type=get_three_state_flag(), help='Enable/Disable detection validation before deploying it')
        c.argument('detection_schema', detection_schema_type)

    with self.argument_context('sentinel detection validate') as c:
        c.argument('detections_directory', detections_directory_type)
        c.argument('detection_file', detection_file_type)
        c.argument('detection_schema', detection_schema_type)

    with self.argument_context('sentinel detection generate') as c:
        c.argument('detections_directory', detections_directory_type)
        c.argument('skip_interactive', options_list=['--skip-interactive'],
                   arg_type=get_three_state_flag(), help='Enable/Disable interactive detection creation')
        # TODO: Add all detection configurations as arguments here
        c.argument('name', options_list=['--name', '-n'], help='Name of your detection(alphanumeric without spaces)')
        c.argument('create_directory', options_list=['--create-dir'],
                   arg_type=get_three_state_flag(), help='Enable/Disable creating new directory for the detection')
        c.argument('with_documentation', options_list=['--with-documentation', '--doc'],
                   arg_type=get_three_state_flag(), help='Enable/Disable detection documentation')

    with self.argument_context('sentinel data_source create') as c:
        c.argument('data_sources_directory', data_sources_directory_type)
        c.argument('data_source_file', data_source_file_type)
        c.argument('enable_validation', options_list=['--enable-validation'],
                   arg_type=get_three_state_flag(), help='Enable/Disable data source validation before deploying it')
        c.argument('data_source_schema', data_source_schema_type)

    with self.argument_context('sentinel data_source validate') as c:
        c.argument('data_sources_directory', data_sources_directory_type)
        c.argument('data_source_file', data_source_file_type)
        c.argument('data_source_schema', data_source_schema_type)

    with self.argument_context('sentinel data_source generate') as c:
        c.argument('data_sources_directory', data_sources_directory_type)
        c.argument('skip_interactive', options_list=['--skip-interactive'],
                   arg_type=get_three_state_flag(), help='Enable/Disable interactive data siyrce creation')
        # TODO: Add all detection configurations as arguments here
        c.argument('name', options_list=['--name', '-n'], help='Name of your data source(alphanumeric without spaces)')
        c.argument('create_directory', options_list=['--create-dir'],
                   arg_type=get_three_state_flag(), help='Enable/Disable creating new directory for the data source')
        c.argument('with_documentation', options_list=['--with-documentation', '--doc'],
                   arg_type=get_three_state_flag(), help='Enable/Disable data source documentation')
コード例 #14
0
ファイル: _params.py プロジェクト: vermashi/azure-cli
def load_arguments(self, _):

    with self.argument_context('batchai') as c:
        c.argument('resource_group', resource_group_name_type)
        c.argument('workspace_name',
                   id_part='name',
                   options_list=['--workspace', '-w'],
                   help='Name of workspace.')

    with self.argument_context('batchai workspace') as c:
        c.argument(
            'location',
            get_location_type(self.cli_ctx),
            help=
            'Location of the workspace. If omitted, the location of the resource group will be used.'
        )
        c.argument('workspace_name',
                   options_list=['--workspace', '-n'],
                   id_part='name',
                   help='Name of workspace.')

    with self.argument_context('batchai cluster') as c:
        c.argument('cluster_name',
                   options_list=['--name', '-n'],
                   id_part='child_name_1',
                   help='Name of cluster.')

    with self.argument_context('batchai cluster create') as c:
        c.argument(
            'json_file',
            options_list=['--config-file', '-f'],
            help=
            'A path to a json file containing cluster create parameters (json representation of azure.mgmt.batchai.models.ClusterCreateParameters).',
            arg_group='Advanced')

    with self.argument_context('batchai cluster create') as c:
        c.argument(
            'setup_task',
            help=
            'A command line which should be executed on each compute node when it\'s got allocated or rebooted. The task is executed in a bash subshell under root account.',
            arg_group='Setup Task')
        c.argument(
            'setup_task_output',
            help=
            'Directory path to store where setup-task\'s logs. Note, Batch AI will create several helper directories under this path. The created directories are reported as stdOutErrPathSuffix by \'az cluster show\' command.',
            arg_group='Setup Task')

    with self.argument_context('batchai cluster create',
                               arg_group='Virtual Network') as c:
        c.argument(
            'subnet',
            options_list=['--subnet'],
            help='ARM ID of a virtual network subnet to put the cluster in.')

    with self.argument_context('batchai cluster create',
                               arg_group='Admin Account') as c:
        c.argument(
            'user_name',
            options_list=['--user-name', '-u'],
            help=
            'Name of admin user account to be created on each compute node. If the value is not provided and no user configuration is provided in the config file, current user\'s name will be used.'
        )
        c.argument(
            'ssh_key',
            options_list=['--ssh-key', '-k'],
            help=
            'Optional SSH public key value or path. If ommited and no password specified, default SSH key (~/.ssh/id_rsa.pub) will be used.',
            completer=FilesCompleter())
        c.argument(
            'generate_ssh_keys',
            action='store_true',
            help=
            'Generate SSH public and private key files in ~/.ssh directory (if missing).'
        )
        c.argument(
            'password',
            options_list=['--password', '-p'],
            help=
            'Optional password for the admin user account to be created on each compute node.'
        )

    with self.argument_context('batchai cluster create',
                               arg_group='Auto Storage') as c:
        c.argument(
            'use_auto_storage',
            action='store_true',
            help=
            'If provided, the command will create a storage account in a new or existing resource group named "batchaiautostorage". It will also create Azure File Share with name "batchaishare", Azure Blob Container with name "batchaicontainer". The File Share and Blob Container will be mounted on each cluster node at $AZ_BATCHAI_MOUNT_ROOT/autoafs and $AZ_BATCHAI_MOUNT_ROOT/autobfs. If the resource group already exists and contains an approapriate storage account belonging to the same region as cluster, this command will reuse existing storage account.'
        )

    with self.argument_context('batchai cluster create',
                               arg_group='Nodes') as c:
        c.argument(
            'image',
            options_list=['--image', '-i'],
            help=
            'Operation system image for cluster nodes. The value may contain an alias ({0}) or specify image details in the form "publisher:offer:sku:version". If image configuration is not provided via command line or configuration file, Batch AI will choose default OS image'
            .format(', '.join(custom.SUPPORTED_IMAGE_ALIASES.keys())))
        c.argument(
            'custom_image',
            help=
            'ARM ID of a virtual machine image to be used for nodes creation. Note, you need to provide --image containing information about the base image used for this image creation.'
        )
        c.argument(
            'vm_size',
            options_list=['--vm-size', '-s'],
            help='VM size for cluster nodes (e.g. Standard_NC6 for 1 GPU node)',
            completer=get_vm_size_completion_list)
        c.argument(
            'target',
            options_list=['--target', '-t'],
            help=
            'Number of nodes which should be allocated immediately after cluster creation. If the cluster is in auto-scale mode, BatchAI can change the number of nodes later based on number of running and queued jobs.'
        )
        c.argument('min_nodes',
                   options_list=['--min'],
                   help='Min nodes count for the auto-scale cluster.',
                   type=int)
        c.argument('max_nodes',
                   options_list=['--max'],
                   help='Max nodes count for the auto-scale cluster.',
                   type=int)
        c.argument('vm_priority',
                   arg_type=get_enum_type(['dedicated', 'lowpriority']),
                   options_list=['--vm-priority'],
                   help="VM priority.")

    with self.argument_context('batchai cluster create',
                               arg_group='File Server Mount') as c:
        c.argument(
            'nfs',
            options_list=['--nfs'],
            help=
            'Name or ARM ID of a file server to be mounted on each cluster node. You need to provide full ARM ID if the file server belongs to a different workspace. Multiple NFS can be mounted using configuration file (see --config-file option).'
        )
        c.argument(
            'nfs_mount_path',
            options_list=['--nfs-mount-path'],
            help=
            'Relative mount path for NFS. The NFS will be available at $AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder.'
        )

    with self.argument_context('batchai cluster create',
                               arg_group='Azure Storage Mount') as c:
        c.argument(
            'account_name',
            options_list=['--storage-account-name'],
            help=
            'Storage account name for Azure File Shares and/or Azure Storage Containers to be mounted on each cluster node. Can be specified using AZURE_BATCHAI_STORAGE_ACCOUNT environment variable.'
        )
        c.argument(
            'account_key',
            options_list=['--storage-account-key'],
            help=
            'Storage account key. Required if the storage account belongs to a different subscription. Can be specified using AZURE_BATCHAI_STORAGE_KEY environment variable.'
        )
        c.argument(
            'azure_file_share',
            options_list=['--afs-name'],
            help=
            'Name of Azure File Share to be mounted on each cluster node. Must be used in conjunction with --storage-account-name. Multiple shares can be mounted using configuration file (see --config-file option).'
        )
        c.argument(
            'afs_mount_path',
            options_list=['--afs-mount-path'],
            help=
            'Relative mount path for Azure File share. The file share will be available at $AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder.'
        )
        c.argument(
            'container_name',
            options_list=['--bfs-name'],
            help=
            'Name of Azure Storage container to be mounted on each cluster node. Must be used in conjunction with --storage-account-name. Multiple containers can be mounted using configuration file (see --config-file option).'
        )
        c.argument(
            'container_mount_path',
            options_list=['--bfs-mount-path'],
            help=
            'Relative mount path for Azure Storage container. The container will be available at $AZ_BATCHAI_MOUNT_ROOT/<relative_mount_path> folder.'
        )

    with self.argument_context('batchai cluster resize') as c:
        c.argument('target',
                   options_list=['--target', '-t'],
                   help='Target number of compute nodes.')

    with self.argument_context('batchai cluster auto-scale') as c:
        c.argument('min_nodes',
                   options_list=['--min'],
                   help='Minimum number of nodes.')
        c.argument('max_nodes',
                   options_list=['--max'],
                   help='Maximum number of nodes.')

    for group in ['batchai cluster file', 'batchai cluster node']:
        with self.argument_context(group) as c:
            c.argument('cluster_name',
                       options_list=['--cluster', '-c'],
                       id_part=None,
                       help='Name of cluster.')
            c.argument('workspace_name',
                       id_part=None,
                       options_list=['--workspace', '-w'],
                       help='Name of workspace.')

    with self.argument_context('batchai cluster file list') as c:
        c.argument(
            'path',
            options_list=['--path', '-p'],
            help=
            'Relative path of a subfolder inside of the node setup task output directory.'
        )
        c.argument(
            'expiry',
            options_list=['--expiry'],
            help=
            'Time in minutes for how long generated download URLs should remain valid.'
        )

    with self.argument_context('batchai cluster list') as c:
        c.argument('workspace_name',
                   options_list=['--workspace', '-w'],
                   id_part=None,
                   help='Name of workspace.')
        c.ignore('clusters_list_options')

    with self.argument_context('batchai experiment') as c:
        c.argument('experiment_name',
                   options_list=['--name', '-n'],
                   id_part='resource_name',
                   help='Name of experiment.')
        c.argument('workspace_name',
                   options_list=['--workspace', '-w'],
                   id_part='name',
                   help='Name of workspace.')

    with self.argument_context('batchai experiment list') as c:
        c.argument('workspace_name',
                   id_part=None,
                   options_list=['--workspace', '-w'],
                   help='Name of workspace.')
        c.ignore('experiments_list_by_workspace_options')

    with self.argument_context('batchai job') as c:
        c.argument('job_name',
                   options_list=['--name', '-n'],
                   id_part='resource_name',
                   help='Name of job.')
        c.argument('experiment_name',
                   options_list=['--experiment', '-e'],
                   id_part='child_name_1',
                   help='Name of experiment.')

    with self.argument_context('batchai job create') as c:
        c.argument(
            'json_file',
            options_list=['--config-file', '-f'],
            help=
            'A path to a json file containing job create parameters (json representation of azure.mgmt.batchai.models.JobCreateParameters).'
        )
        c.argument(
            'cluster',
            options_list=['--cluster', '-c'],
            help=
            'Name or ARM ID of the cluster to run the job. You need to provide ARM ID if the cluster belongs to a different workspace.'
        )

    with self.argument_context('batchai job create',
                               arg_group='Azure Storage Mount') as c:
        c.argument(
            'account_name',
            options_list=['--storage-account-name'],
            help=
            'Storage account name for Azure File Shares and/or Azure Storage Containers to be mounted on each cluster node. Can be specified using AZURE_BATCHAI_STORAGE_ACCOUNT environment variable.'
        )
        c.argument(
            'account_key',
            options_list=['--storage-account-key'],
            help=
            'Storage account key. Required if the storage account belongs to a different subscription. Can be specified using AZURE_BATCHAI_STORAGE_KEY environment variable.'
        )
        c.argument(
            'azure_file_share',
            options_list=['--afs-name'],
            help=
            'Name of Azure File Share to mount during the job execution. The File Share will be mounted only on the nodes which are executing the job. Must be used in conjunction with --storage-account-name.  Multiple shares can be mounted using configuration file (see --config-file option).'
        )
        c.argument(
            'afs_mount_path',
            options_list=['--afs-mount-path'],
            help=
            'Relative mount path for Azure File Share. The File Share will be available at $AZ_BATCHAI_JOB_MOUNT_ROOT/<relative_mount_path> folder.'
        )
        c.argument(
            'container_name',
            options_list=['--bfs-name'],
            help=
            'Name of Azure Storage Blob Container to mount during the job execution. The container will be mounted only on the nodes which are executing the job. Must be used in conjunction with --storage-account-name. Multiple containers can be mounted using configuration file (see --config-file option).'
        )
        c.argument(
            'container_mount_path',
            options_list=['--bfs-mount-path'],
            help=
            'Relative mount path for Azure Storage Blob Container. The container will be available at $AZ_BATCHAI_JOB_MOUNT_ROOT/<relative_mount_path> folder.'
        )

    with self.argument_context('batchai job create',
                               arg_group='File Server Mount') as c:
        c.argument(
            'nfs',
            options_list=['--nfs'],
            help=
            'Name or ARM ID of the file server to be mounted during the job execution. You need to provide ARM ID if the file server belongs to a different workspace. You can configure multiple file servers using job\'s  configuration file.'
        )
        c.argument(
            'nfs_mount_path',
            options_list=['--nfs-mount-path'],
            help=
            'Relative mount path for NFS. The NFS will be available at $AZ_BATCHAI_JOB_MOUNT_ROOT/<relative_mount_path> folder.'
        )

    with self.argument_context('batchai job list') as c:
        c.argument('workspace_name',
                   id_part=None,
                   options_list=['--workspace', '-w'],
                   help='Name of workspace.')
        c.argument('experiment_name',
                   options_list=['--experiment', '-e'],
                   id_part=None,
                   help='Name of experiment.')
        c.ignore('jobs_list_by_experiment_options')

    for group in ['batchai job file', 'batchai job node']:
        with self.argument_context(group) as c:
            c.argument('job_name',
                       options_list=['--job', '-j'],
                       id_part=None,
                       help='Name of job.')
            c.argument('workspace_name',
                       id_part=None,
                       options_list=['--workspace', '-w'],
                       help='Name of workspace.')
            c.argument('experiment_name',
                       options_list=['--experiment', '-e'],
                       id_part=None,
                       help='Name of experiment.')
            c.argument(
                'output_directory_id',
                options_list=['--output-directory-id', '-d'],
                help=
                'The Id of the job\'s output directory (as specified by "id" element in outputDirectories collection in the job create parameters).'
            )
            c.argument('path',
                       options_list=['--path', '-p'],
                       help='Relative path in the given output directory.')

    with self.argument_context('batchai job file stream') as c:
        c.argument('file_name',
                   options_list=['--file-name', '-f'],
                   help='The name of the file to stream.')

    with self.argument_context('batchai job file list') as c:
        c.argument(
            'expiry',
            options_list=['--expiry'],
            type=int,
            help=
            'Time in minutes for how long generated download URL should remain valid.'
        )

    with self.argument_context('batchai job wait') as c:
        c.argument('check_interval_sec',
                   options_list=['--interval'],
                   help="Polling interval in sec.")

    for group in ['batchai cluster node exec', 'batchai job node exec']:
        with self.argument_context(group) as c:
            c.argument(
                'cmdline',
                options_list=['--exec'],
                help=
                'Optional command line to be executed on the node. If not provided, the command will perform ports forwarding only.'
            )
            c.argument(
                'node_id',
                options_list=['--node-id', '-n'],
                help=
                'ID of the node to forward the ports to. If not provided, the command will be executed on the first available node.'
            )
            c.argument(
                'ports',
                options_list=['--address', '-L'],
                action='append',
                help=
                'Specifies that connections to the given TCP port or Unix socket on the local (client) host are to be forwarded to the given host and port, or Unix socket, on the remote side. e.g. -L 8080:localhost:8080'
            )
            c.argument('password',
                       options_list=['--password', '-p'],
                       help='Optional password to establish SSH connection.')
            c.argument(
                'ssh_private_key',
                options_list=['--ssh-private-key', '-k'],
                help=
                'Optional SSH private key path to establish SSH connection. If omitted, the default SSH private key will be used.'
            )

    with self.argument_context('batchai file-server') as c:
        c.argument(
            'resource_group',
            options_list=['--resource-group', '-g'],
            configured_default='default_workspace_resource_group',
            id_part='resource_group',
            help=
            'Name of resource group. You can configure a default value by setting up default workspace using `az batchai workspace set-default`.'
        )
        c.argument(
            'workspace',
            options_list=['--workspace', '-w'],
            configured_default='default_workspace_name',
            id_part='name',
            help=
            'Name or ARM ID of the workspace. You can configure default workspace using `az batchai workspace set-default`'
        )
        c.argument('file_server_name',
                   options_list=['--name', '-n'],
                   id_part='child_name_1',
                   help='Name of file server.')

    with self.argument_context('batchai file-server create') as c:
        c.argument('vm_size',
                   options_list=['--vm-size', '-s'],
                   help='VM size.',
                   completer=get_vm_size_completion_list)
        c.argument(
            'json_file',
            options_list=['--config-file', '-f'],
            help=
            'A path to a json file containing file server create parameters (json representation of azure.mgmt.batchai.models.FileServerCreateParameters). Note, parameters given via command line will overwrite parameters specified in the configuration file.',
            arg_group='Advanced')

    with self.argument_context('batchai file-server create',
                               arg_group='Storage Disks') as c:
        c.argument('disk_count', help='Number of disks.', type=int)
        c.argument('disk_size', help='Disk size in Gb.', type=int)
        c.argument(
            'caching_type',
            arg_type=get_enum_type(['none', 'readonly', 'readwrite']),
            help=
            'Caching type for premium disks. If not provided via command line or in configuration file, no caching will be used.'
        )
        c.argument('storage_sku',
                   arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']),
                   help='The sku of storage account to persist VM.')

    with self.argument_context('batchai file-server create',
                               arg_group='Admin Account') as c:
        c.argument(
            'user_name',
            options_list=['--user-name', '-u'],
            help=
            'Name of admin user account to be created on NFS node. If the value is not provided and no user configuration is provided in the config file, current user\'s name will be used.'
        )
        c.argument(
            'ssh_key',
            options_list=['--ssh-key', '-k'],
            help=
            'Optional SSH public key value or path. If ommited and no password specified, default SSH key (~/.ssh/id_rsa.pub) will be used.',
            completer=FilesCompleter())
        c.argument(
            'generate_ssh_keys',
            action='store_true',
            help=
            'Generate SSH public and private key files in ~/.ssh directory (if missing).'
        )
        c.argument(
            'password',
            options_list=['--password', '-p'],
            help='Optional password for the admin user created on the NFS node.'
        )

    with self.argument_context('batchai file-server create',
                               arg_group='Virtual Network') as c:
        c.argument(
            'subnet',
            options_list=['--subnet'],
            help=
            'ARM ID of a virtual network subnet to put the file server in. If not provided via command line or in the configuration file, Batch AI will create a new virtual network and subnet under your subscription.'
        )

    with self.argument_context('batchai file-server list') as c:
        c.argument('workspace_name',
                   options_list=['--workspace', '-w'],
                   id_part=None,
                   help='Name of workspace.')
        c.ignore('file_servers_list_by_workspace_options')