コード例 #1
0
def copy_boot_volume_backup(ctx, from_json, wait_for_state, max_wait_seconds,
                            wait_interval_seconds, boot_volume_backup_id,
                            destination_region, display_name, kms_key_id):

    if isinstance(boot_volume_backup_id, six.string_types) and len(
            boot_volume_backup_id.strip()) == 0:
        raise click.UsageError(
            'Parameter --boot-volume-backup-id cannot be whitespace or empty string'
        )

    kwargs = {}
    kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(
        ctx.obj['request_id'])

    details = {}
    details['destinationRegion'] = destination_region

    if display_name is not None:
        details['displayName'] = display_name

    if kms_key_id is not None:
        details['kmsKeyId'] = kms_key_id

    client = cli_util.build_client('core', 'blockstorage', ctx)
    result = client.copy_boot_volume_backup(
        boot_volume_backup_id=boot_volume_backup_id,
        copy_boot_volume_backup_details=details,
        **kwargs)
    # Newly created Resource will be in a different region from the origin region.
    # We should build the client for destination region
    ctx.obj['region'] = destination_region
    client = cli_util.build_client('core', 'blockstorage', ctx)
    if wait_for_state:
        if hasattr(client, 'get_boot_volume_backup') and callable(
                getattr(client, 'get_boot_volume_backup')):
            try:
                wait_period_kwargs = {}
                if max_wait_seconds is not None:
                    wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
                if wait_interval_seconds is not None:
                    wait_period_kwargs[
                        'max_interval_seconds'] = wait_interval_seconds

                click.echo(
                    'Action completed. Waiting until the resource has entered state: {}'
                    .format(wait_for_state),
                    file=sys.stderr)
                result = oci.wait_until(
                    client, client.get_boot_volume_backup(result.data.id),
                    'lifecycle_state', wait_for_state, **wait_period_kwargs)
            except oci.exceptions.MaximumWaitTimeExceeded as e:
                # If we fail, we should show an error, but we should still provide the information to the customer
                click.echo(
                    'Failed to wait until the resource entered the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                sys.exit(2)
            except Exception:
                click.echo(
                    'Encountered error while waiting for resource to enter the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                raise
        else:
            click.echo(
                'Unable to wait for the resource to enter the specified state',
                file=sys.stderr)
    cli_util.render_response(result, ctx)
コード例 #2
0
ファイル: api.py プロジェクト: danielavarelat/cli_apps_isabl
def get_instances(endpoint, identifiers=None, verbose=True, **filters):
    """
    Return instances from a list API endpoint.

    If not `identifiers` and not `filters` retrieves all objects in database.
    if `identifiers` and `filters`, identifieres might be filtered.

    Arguments:
        endpoint (str): endpoint without API base URL (e.g. `analyses`).
        identifiers (list): List of identifiers.
        filters (dict): name, value pairs for API filtering.
        verbose (bool): print to stderr how many instances will be retrieved.

    Raises:
        click.UsageError: if string identifier and endpoint not in individuals,
            samples or workdflows.

    Returns:
        list: of types.SimpleNamespace objects loaded with dicts from API.
    """
    check_system_id = endpoint in {"individuals", "samples", "experiments"}
    check_name = endpoint in {"assemblies", "techniques", "tags"}
    instances = []

    if verbose:
        count = len(identifiers or [])
        count += 0 if identifiers else get_instances_count(endpoint, **filters)
        ids_msg = " at most " if identifiers else " "  # ids may be in filters
        count = f"Retrieving{ids_msg}{count} from {endpoint} API endpoint..."
        click.echo(count, err=True)

    if identifiers is None:
        instances += iterate(endpoint, **filters)
    else:
        for chunk in chunks(identifiers or [], 10000):
            filters["url"] = endpoint
            primary_keys = set()
            names = set()
            ids = set()

            for i in map(str, chunk):
                if i.isdigit():
                    primary_keys.add(i)
                elif check_name:
                    names.add(i)
                elif check_system_id:
                    ids.add(i)
                else:  # pragma: no cover
                    msg = f"msg invalid identifier for {endpoint}: {i}"
                    raise click.UsageError(msg)

            if primary_keys:
                instances += iterate(**{"pk__in": ",".join(primary_keys), **filters})

            if ids:
                instances += iterate(**{"system_id__in": ",".join(ids), **filters})

            if names:
                instances += iterate(**{"name__in": ",".join(names), **filters})

    return isablfy(instances)
コード例 #3
0
ファイル: delete_cli.py プロジェクト: taufiqibrahim/datahub
def delete(
    urn: str,
    force: bool,
    soft: bool,
    env: str,
    platform: str,
    entity_type: str,
    query: str,
    registry_id: str,
    dry_run: bool,
) -> None:
    """Delete metadata from datahub using a single urn or a combination of filters"""

    cli_utils.test_connectivity_complain_exit("delete")
    # one of urn / platform / env / query must be provided
    if not urn and not platform and not env and not query and not registry_id:
        raise click.UsageError(
            "You must provide either an urn or a platform or an env or a query for me to delete anything"
        )

    # default query is set to "*" if not provided
    query = "*" if query is None else query

    if not force and not soft and not dry_run:
        click.confirm(
            "This will permanently delete data from DataHub. Do you want to continue?",
            abort=True,
        )

    if urn:
        # Single urn based delete
        session, host = cli_utils.get_session_and_host()
        entity_type = guess_entity_type(urn=urn)
        logger.info(f"DataHub configured with {host}")
        deletion_result: DeletionResult = delete_one_urn_cmd(
            urn,
            soft=soft,
            dry_run=dry_run,
            entity_type=entity_type,
            cached_session_host=(session, host),
        )

        if not dry_run:
            if deletion_result.num_records == 0:
                click.echo(f"Nothing deleted for {urn}")
            else:
                click.echo(
                    f"Successfully deleted {urn}. {deletion_result.num_records} rows deleted"
                )
    elif registry_id:
        # Registry-id based delete
        if soft and not dry_run:
            raise click.UsageError(
                "Soft-deleting with a registry-id is not yet supported. Try --dry-run to see what you will be deleting, before issuing a hard-delete using the --hard flag"
            )
        deletion_result = delete_for_registry(registry_id=registry_id,
                                              soft=soft,
                                              dry_run=dry_run)
    else:
        # Filter based delete
        deletion_result = delete_with_filters(
            env=env,
            platform=platform,
            dry_run=dry_run,
            soft=soft,
            entity_type=entity_type,
            search_query=query,
            force=force,
        )

    if not dry_run:
        message = "soft delete" if soft else "hard delete"
        click.echo(
            f"Took {(deletion_result.end_time_millis-deletion_result.start_time_millis)/1000.0} seconds to {message} {deletion_result.num_records} rows for {deletion_result.num_entities} entities"
        )
    else:
        click.echo(
            f"{deletion_result.num_entities} entities with {deletion_result.num_records if deletion_result.num_records != UNKNOWN_NUM_RECORDS else 'unknown'} rows will be affected. Took {(deletion_result.end_time_millis-deletion_result.start_time_millis)/1000.0} seconds to evaluate."
        )
    if deletion_result.sample_records:
        click.echo(
            tabulate(deletion_result.sample_records,
                     RUN_TABLE_COLUMNS,
                     tablefmt="grid"))
コード例 #4
0
 def parse_args(self, ctx, args):
     if not args and self.no_args_is_help and not ctx.resilient_parsing:
         raise click.UsageError(ctx.get_help())
     return super().parse_args(ctx, args)
コード例 #5
0
ファイル: build.py プロジェクト: ladislas/mbed-tools
def _validate_target_and_toolchain_args(target: str, toolchain: str) -> None:
    if not all([toolchain, target]):
        raise click.UsageError("--toolchain and --mbed-target arguments are required when generating Mbed config!")
コード例 #6
0
def update_data_mask_rule_target_ids_selected_extended(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, data_mask_rule_id, display_name, compartment_id, iam_group_id, data_mask_categories, data_mask_rule_status, freeform_tags, defined_tags, if_match, target_selected_values):

    if isinstance(data_mask_rule_id, six.string_types) and len(data_mask_rule_id.strip()) == 0:
        raise click.UsageError('Parameter --data-mask-rule-id cannot be whitespace or empty string')
    if not force:
        if data_mask_categories or freeform_tags or defined_tags:
            if not click.confirm("WARNING: Updates to data-mask-categories and freeform-tags and defined-tags will replace any existing values. Are you sure you want to continue?"):
                ctx.abort()

    kwargs = {}
    if if_match is not None:
        kwargs['if_match'] = if_match
    kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])

    _details = {}
    _details['targetSelected'] = {}

    if display_name is not None:
        _details['displayName'] = display_name

    if compartment_id is not None:
        _details['compartmentId'] = compartment_id

    if iam_group_id is not None:
        _details['iamGroupId'] = iam_group_id

    if data_mask_categories is not None:
        _details['dataMaskCategories'] = data_mask_categories

    if data_mask_rule_status is not None:
        _details['dataMaskRuleStatus'] = data_mask_rule_status

    if freeform_tags is not None:
        _details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)

    if defined_tags is not None:
        _details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)

    if target_selected_values is not None:
        _details['targetSelected']['values'] = cli_util.parse_json_parameter("target_selected_values", target_selected_values)

    _details['targetSelected']['kind'] = 'TARGETIDS'

    client = cli_util.build_client('cloud_guard', 'cloud_guard', ctx)
    result = client.update_data_mask_rule(
        data_mask_rule_id=data_mask_rule_id,
        update_data_mask_rule_details=_details,
        **kwargs
    )
    if wait_for_state:

        if hasattr(client, 'get_data_mask_rule') and callable(getattr(client, 'get_data_mask_rule')):
            try:
                wait_period_kwargs = {}
                if max_wait_seconds is not None:
                    wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
                if wait_interval_seconds is not None:
                    wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds

                click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
                result = oci.wait_until(client, client.get_data_mask_rule(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
            except oci.exceptions.MaximumWaitTimeExceeded as e:
                # If we fail, we should show an error, but we should still provide the information to the customer
                click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
                cli_util.render_response(result, ctx)
                sys.exit(2)
            except Exception:
                click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
                cli_util.render_response(result, ctx)
                raise
        else:
            click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
    cli_util.render_response(result, ctx)
コード例 #7
0
ファイル: users.py プロジェクト: sajadkh61/r8
def cli(entry_filter, challenge_filter, transpose, format, teams, team_solves):
    """View users and their progress."""

    if teams and not team_solves:
        raise click.UsageError(
            "--teams and --no-team-solves are mutually exclusive.")

    with r8.db:
        all_challenges = r8.db.execute(
            "SELECT cid, team FROM challenges WHERE t_start < datetime('now') ORDER BY ROWID"
        ).fetchall()
        user_info = r8.db.execute(
            "SELECT uid, tid FROM users NATURAL LEFT JOIN teams ORDER BY users.ROWID"
        ).fetchall()
        submissions = r8.db.execute(
            "SELECT uid, tid, cid FROM submissions NATURAL JOIN flags NATURAL LEFT JOIN teams"
        ).fetchall()

    entries = []  # either teams or users
    team_users = collections.defaultdict(list)
    for uid, tid in user_info:
        if teams:
            entries.append(tid)
        else:
            entries.append(uid)
        team_users[tid].append(uid)

    # remove duplicate teams
    entries = list(dict.fromkeys(entries))

    if entry_filter:
        entries = [
            entry for entry in entries if any(
                entry.startswith(x) for x in entry_filter)
        ]
    entry_index = {x: i for i, x in enumerate(entries)}

    challenges = {}
    for cid, is_team_challenge in all_challenges:
        if teams and not is_team_challenge:
            continue
        if challenge_filter and not any(
                cid.startswith(c) for c in challenge_filter):
            continue
        challenges[cid] = is_team_challenge

    if format == "table":
        SOLVED = "OK"
        NOT_SOLVED = "FAIL"
    else:
        SOLVED = "TRUE"
        NOT_SOLVED = "FALSE"

    solved = {cid: [NOT_SOLVED] * len(entries) for cid in challenges}
    if teams:
        for _, tid, cid in submissions:
            if cid in challenges and tid in entry_index:
                solved[cid][entry_index[tid]] = SOLVED
    else:
        for uid, tid, cid in submissions:
            if cid in challenges:
                if challenges[cid] and team_solves:
                    for uid in team_users[tid]:
                        if uid in entry_index:
                            solved[cid][entry_index[uid]] = SOLVED
                elif uid in entry_index:
                    solved[cid][entry_index[uid]] = SOLVED

    if not transpose and teams:
        header = "Team"
    elif not transpose and not teams:
        header = "User"
    else:
        header = "Challenge"

    table_contents = ([[header] + entries] +
                      [[cid] + solved for cid, solved in solved.items()])
    if format == "table":
        for row in table_contents:
            row[0] = row[0][:22]
    if not transpose:
        table_contents = list(zip(*table_contents))

    if format == "table":
        table = texttable.Texttable(shutil.get_terminal_size((0, 0))[0])
        table.set_cols_align(["l"] + ["c"] * (len(table_contents[0]) - 1))
        table.set_deco(table.BORDER | table.HEADER | table.VLINES)
        table.add_rows(table_contents)

        try:
            table._compute_cols_width()
        except ValueError:
            table._max_width = False

        tbl = table.draw()
        print(
            tbl.replace(SOLVED, click.style(SOLVED, fg="green")).replace(
                NOT_SOLVED, click.style(NOT_SOLVED, fg="red")))
    else:
        for row in table_contents:
            print(", ".join((x or "").replace(",", ";") for x in row))
コード例 #8
0
 def fail(lineno, line, reason):
     raise click.UsageError(
         f'{script_file.name}:{lineno}:"{line}": {reason}')
コード例 #9
0
def python(ctx, runnable, gateway, image, env, passenv, kill, download,
           allow_overwrite, signum, timeout, detach, port, args):
    """
        Run a python script on the gateway
    """
    if not runnable and not kill:
        raise click.UsageError('Please supply a RUNNABLE or the --kill option')

    if not allow_overwrite:
        for filename in download:
            file = pathlib.Path(filename)
            if file.exists():
                raise click.UsageError(
                    f'File {filename} exists; please rename it or use the --allow-overwrite flag'
                )

    session = ctx.obj.session
    if gateway is None:
        gateway = get_default_gateway(ctx)

    if kill:
        signum = _get_signal_number(signum)
        resp = session.kill_python(gateway, None, signum)
        resp.raise_for_status()
        return

    thread = threading.Thread(target=debug_tunnel,
                              args=(ctx, gateway),
                              daemon=True)
    thread.start()

    post_data = [
        ('image', image),
        ('stdout_is_stderr', stdout_is_stderr()),
        ('detach', '1' if detach else '0'),
    ]
    post_data.extend(zip(itertools.repeat('args'), args))
    post_data.extend(zip(itertools.repeat('env'), env))
    lager_process_id = str(uuid.uuid4())
    post_data.append(('env', f'LAGER_PROCESS_ID={lager_process_id}'))
    post_data.extend(
        zip(itertools.repeat('env'),
            [f'{name}={os.environ[name]}' for name in passenv]))
    post_data.extend(
        zip(itertools.repeat('portforwards'),
            [json.dumps(p._asdict()) for p in port]))

    if timeout is not None:
        post_data.append(('timeout', timeout))

    if os.path.isfile(runnable):
        post_data.append(('script', open(runnable, 'rb')))
    elif os.path.isdir(runnable):
        try:
            max_content_size = 20_000_000
            zipped_folder = zip_dir(runnable,
                                    max_content_size=max_content_size)
        except SizeLimitExceeded:
            click.secho(
                f'Folder content exceeds max size of {max_content_size:,} bytes',
                err=True,
                fg='red')
            ctx.exit(1)

        if len(zipped_folder) > MAX_ZIP_SIZE:
            click.secho(
                f'Zipped module content exceeds max size of {MAX_ZIP_SIZE:,} bytes',
                err=True,
                fg='red')
            ctx.exit(1)

        post_data.append(('module', zipped_folder))

    resp = session.run_python(gateway, files=post_data)
    kill_python = functools.partial(session.kill_python, gateway,
                                    lager_process_id)
    handler = functools.partial(sigint_handler, kill_python)
    signal.signal(signal.SIGINT, handler)

    try:
        for (datatype, content) in stream_python_output(resp):
            if datatype == StreamDatatypes.EXIT:
                _do_exit(content, gateway, session, download)
            elif datatype == StreamDatatypes.STDOUT:
                click.echo(content, nl=False)
            elif datatype == StreamDatatypes.STDERR:
                click.echo(content, nl=False, err=True)
            elif datatype == StreamDatatypes.OUTPUT:
                click.echo(content)

    except OutputFormatNotSupported:
        click.secho('Response format not supported. Please upgrade lager-cli',
                    fg='red',
                    err=True)
        sys.exit(1)
コード例 #10
0
def component_elastic_load_balancer_v2(definition, configuration: dict,
                                       args: TemplateArguments, info: dict,
                                       force, account_info: AccountArguments):
    lb_name = configuration["Name"]
    # domains pointing to the load balancer
    subdomain = ''
    main_zone = None
    for name, domain in configuration.get('Domains', {}).items():
        name = '{}{}'.format(lb_name, name)

        domain_name = "{0}.{1}".format(domain["Subdomain"], domain["Zone"])

        convert_cname_records_to_alias(domain_name)

        properties = {
            "Type": "A",
            "Name": domain_name,
            "HostedZoneName": domain["Zone"],
            "AliasTarget": {
                "HostedZoneId": {
                    "Fn::GetAtt": [lb_name, "CanonicalHostedZoneID"]
                },
                "DNSName": {
                    "Fn::GetAtt": [lb_name, "DNSName"]
                }
            }
        }
        definition["Resources"][name] = {
            "Type": "AWS::Route53::RecordSet",
            "Properties": properties
        }

        if domain["Type"] == "weighted":
            definition["Resources"][name]["Properties"]['Weight'] = 0
            definition["Resources"][name]["Properties"][
                'SetIdentifier'] = "{0}-{1}".format(info["StackName"],
                                                    info["StackVersion"])
            subdomain = domain['Subdomain']
            main_zone = domain['Zone']  # type: str

    target_group_name = lb_name + 'TargetGroup'
    listeners = configuration.get('Listeners') or get_listeners(
        lb_name, target_group_name, subdomain, main_zone, configuration,
        account_info)

    health_check_protocol = configuration.get('HealthCheckProtocol') or 'HTTP'

    if health_check_protocol not in ALLOWED_HEALTH_CHECK_PROTOCOLS:
        raise click.UsageError(
            'Protocol "{}" is not supported for LoadBalancer'.format(
                health_check_protocol))

    health_check_path = configuration.get("HealthCheckPath") or '/health'
    health_check_port = configuration.get(
        "HealthCheckPort") or configuration["HTTPPort"]

    if configuration.get('LoadBalancerName'):
        loadbalancer_name = generate_valid_cloud_name(
            configuration["LoadBalancerName"], 32)
    elif configuration.get('NameSuffix'):
        version = '{}-{}'.format(info["StackVersion"],
                                 configuration['NameSuffix'])
        loadbalancer_name = get_load_balancer_name(info["StackName"], version)
        del (configuration['NameSuffix'])
    else:
        loadbalancer_name = get_load_balancer_name(info["StackName"],
                                                   info["StackVersion"])

    loadbalancer_scheme = configuration.get('Scheme') or 'internal'
    if loadbalancer_scheme == 'internet-facing':
        click.secho(
            'You are deploying an internet-facing ELB that will be '
            'publicly accessible! You should have OAUTH2 and HTTPS '
            'in place!',
            bold=True,
            err=True)

    if loadbalancer_scheme not in ALLOWED_LOADBALANCER_SCHEMES:
        raise click.UsageError(
            'Scheme "{}" is not supported for LoadBalancer'.format(
                loadbalancer_scheme))

    if loadbalancer_scheme == "internal":
        loadbalancer_subnet_map = "LoadBalancerInternalSubnets"
    else:
        loadbalancer_subnet_map = "LoadBalancerSubnets"

    vpc_id = configuration.get("VpcId") or account_info.VpcID

    tags = [
        # Tag "Name"
        {
            "Key": "Name",
            "Value": "{0}-{1}".format(info["StackName"], info["StackVersion"])
        },
        # Tag "StackName"
        {
            "Key": "StackName",
            "Value": info["StackName"],
        },
        # Tag "StackVersion"
        {
            "Key": "StackVersion",
            "Value": info["StackVersion"]
        }
    ]

    # load balancer
    definition["Resources"][lb_name] = {
        "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer",
        "Properties": {
            'Name':
            loadbalancer_name,
            'Scheme':
            loadbalancer_scheme,
            'SecurityGroups':
            resolve_security_groups(configuration["SecurityGroups"],
                                    args.region),
            'Subnets': {
                "Fn::FindInMap":
                [loadbalancer_subnet_map, {
                    "Ref": "AWS::Region"
                }, "Subnets"]
            },
            'LoadBalancerAttributes': [{
                "Key": "idle_timeout.timeout_seconds",
                "Value": "60"
            }],
            "Tags":
            tags
        }
    }
    definition["Resources"][target_group_name] = {
        'Type': 'AWS::ElasticLoadBalancingV2::TargetGroup',
        'Properties': {
            'Name':
            loadbalancer_name,
            'HealthCheckIntervalSeconds':
            '10',
            'HealthCheckPath':
            health_check_path,
            'HealthCheckPort':
            health_check_port,
            'HealthCheckProtocol':
            health_check_protocol,
            'HealthCheckTimeoutSeconds':
            '5',
            'HealthyThresholdCount':
            '2',
            'Port':
            configuration['HTTPPort'],
            'Protocol':
            'HTTP',
            'UnhealthyThresholdCount':
            '2',
            'VpcId':
            vpc_id,
            'Tags':
            tags,
            'TargetGroupAttributes': [{
                'Key': 'deregistration_delay.timeout_seconds',
                'Value': '60'
            }]
        }
    }
    resource_names = set([lb_name, target_group_name])
    for i, listener in enumerate(listeners):
        if i == 0:
            suffix = ''
        else:
            suffix = str(i + 1)
        resource_name = lb_name + 'Listener' + suffix
        definition['Resources'][resource_name] = listener
        resource_names.add(resource_name)
    for key, val in configuration.items():
        # overwrite any specified properties, but only properties which were defined by us already
        for res in resource_names:
            if key in definition['Resources'][res][
                    'Properties'] and key not in SENZA_PROPERTIES:
                definition['Resources'][res]['Properties'][key] = val
    return definition
コード例 #11
0
def _execute_backfill_command_at_location(cli_args, print_fn, instance,
                                          repo_location):
    external_repo = get_external_repository_from_repo_location(
        repo_location, cli_args.get("repository"))

    external_pipeline = get_external_pipeline_from_external_repo(
        external_repo,
        cli_args.get("pipeline"),
    )

    noprompt = cli_args.get("noprompt")

    pipeline_partition_set_names = {
        external_partition_set.name: external_partition_set
        for external_partition_set in
        external_repo.get_external_partition_sets()
        if external_partition_set.pipeline_name == external_pipeline.name
    }

    if not pipeline_partition_set_names:
        raise click.UsageError(
            "No partition sets found for pipeline `{}`".format(
                external_pipeline.name))
    partition_set_name = cli_args.get("partition_set")
    if not partition_set_name:
        if len(pipeline_partition_set_names) == 1:
            partition_set_name = next(iter(
                pipeline_partition_set_names.keys()))
        elif noprompt:
            raise click.UsageError(
                "No partition set specified (see option `--partition-set`)")
        else:
            partition_set_name = click.prompt(
                "Select a partition set to use for backfill: {}".format(
                    ", ".join(x for x in pipeline_partition_set_names.keys())))

    partition_set = pipeline_partition_set_names.get(partition_set_name)

    if not partition_set:
        raise click.UsageError(
            "No partition set found named `{}`".format(partition_set_name))

    mode = partition_set.mode
    solid_selection = partition_set.solid_selection
    run_tags = get_tags_from_args(cli_args)

    repo_handle = RepositoryHandle(
        repository_name=external_repo.name,
        repository_location_handle=repo_location.location_handle,
    )

    # Resolve partitions to backfill
    partition_names_or_error = repo_location.get_external_partition_names(
        repo_handle,
        partition_set_name,
    )

    if isinstance(partition_names_or_error,
                  ExternalPartitionExecutionErrorData):
        raise DagsterBackfillFailedError(
            "Failure fetching partition names for {partition_set_name}: {error_message}"
            .format(
                partition_set_name=partition_set_name,
                error_message=partition_names_or_error.error.message,
            ),
            serialized_error_info=partition_names_or_error.error,
        )

    partition_names = gen_partition_names_from_args(
        partition_names_or_error.partition_names, cli_args)

    # Print backfill info
    print_fn("\n     Pipeline: {}".format(external_pipeline.name))
    print_fn("Partition set: {}".format(partition_set_name))
    print_fn("   Partitions: {}\n".format(
        print_partition_format(partition_names, indent_level=15)))

    # Confirm and launch
    if noprompt or click.confirm(
            "Do you want to proceed with the backfill ({} partitions)?".format(
                len(partition_names))):

        print_fn("Launching runs... ")

        backfill_id = make_new_backfill_id()
        backfill_tags = PipelineRun.tags_for_backfill_id(backfill_id)
        partition_execution_data = repo_location.get_external_partition_set_execution_param_data(
            repository_handle=repo_handle,
            partition_set_name=partition_set_name,
            partition_names=partition_names,
        )

        if isinstance(partition_execution_data,
                      ExternalPartitionExecutionErrorData):
            return print_fn("Backfill failed: {}".format(
                partition_execution_data.error))

        assert isinstance(partition_execution_data,
                          ExternalPartitionSetExecutionParamData)

        for partition_data in partition_execution_data.partition_data:
            run = _create_external_pipeline_run(
                instance=instance,
                repo_location=repo_location,
                external_repo=external_repo,
                external_pipeline=external_pipeline,
                run_config=partition_data.run_config,
                mode=mode,
                preset=None,
                tags=merge_dicts(
                    merge_dicts(partition_data.tags, backfill_tags), run_tags),
                solid_selection=frozenset(solid_selection)
                if solid_selection else None,
            )

            instance.launch_run(run.run_id, external_pipeline)

        print_fn("Launched backfill job `{}`".format(backfill_id))

    else:
        print_fn("Aborted!")
コード例 #12
0
def ls(paths, fields=None, format="auto", recursive=False, jobs=6):
    """List .nwb files and dandisets metadata.
    """
    # TODO: more logical ordering in case of fields = None
    from .formatter import JSONFormatter, PYOUTFormatter, YAMLFormatter
    from ..consts import metadata_all_fields

    # TODO: avoid
    from ..support.pyout import PYOUT_SHORT_NAMES_rev
    from ..utils import find_files

    common_fields = ("path", "size")
    all_fields = tuple(sorted(set(common_fields + metadata_all_fields)))

    if fields is not None:
        if fields.strip() == "":
            display_known_fields(all_fields)
            return

        fields = fields.split(",")
        # Map possibly present short names back to full names
        fields = [PYOUT_SHORT_NAMES_rev.get(f.lower(), f) for f in fields]
        unknown_fields = set(fields).difference(all_fields)
        if unknown_fields:
            display_known_fields(all_fields)
            raise click.UsageError("Following fields are not known: %s" %
                                   ", ".join(unknown_fields))

    urls = map(is_url, paths)

    # Actually I do not see why and it could be useful to compare local-vs-remote
    # if any(urls) and not all(urls):
    #     raise ValueError(f"ATM cannot mix URLs with local paths. Got {paths}")

    def assets_gen():
        for path in paths:
            if is_url(path):
                from ..dandiarchive import navigate_url

                with navigate_url(path) as (client, dandiset, assets):
                    if dandiset:
                        rec = {
                            "path":
                            dandiset.pop("dandiset",
                                         {}).get("identifier",
                                                 "ERR#%s" % id(dandiset))
                        }
                        # flatten the metadata into record to display
                        # rec.update(dandiset.get('metadata', {}))
                        rec.update(dandiset)
                        yield rec
                    if recursive and assets:
                        yield from assets
            else:
                # For now we support only individual files
                yield path
                if recursive:
                    yield from find_files(r"\.nwb\Z", path)

    if format == "auto":
        format = "yaml" if any(urls) or (len(paths) == 1
                                         and not recursive) else "pyout"

    if format == "pyout":
        if fields and fields[0] != "path":
            # we must always have path - our "id"
            fields = ["path"] + fields
        out = PYOUTFormatter(fields=fields, wait_for_top=3, max_workers=jobs)
    elif format == "json":
        out = JSONFormatter()
    elif format == "json_pp":
        out = JSONFormatter(indent=2)
    elif format == "yaml":
        out = YAMLFormatter()
    else:
        raise NotImplementedError("Unknown format %s" % format)

    async_keys = set(all_fields)
    if fields is not None:
        async_keys = async_keys.intersection(fields)
    async_keys = tuple(async_keys.difference(common_fields))

    errors = defaultdict(list)  # problem: [] paths
    with out:
        for asset in assets_gen():
            if isinstance(asset, str):  # path
                rec = {}
                rec["path"] = asset

                try:
                    if (not fields
                            or "size" in fields) and not op.isdir(asset):
                        rec["size"] = os.stat(asset).st_size

                    if async_keys:
                        cb = get_metadata_ls(asset,
                                             async_keys,
                                             errors=errors,
                                             flatten=format == "pyout")
                        if format == "pyout":
                            rec[async_keys] = cb
                        else:
                            # TODO: parallel execution
                            # For now just call callback and get all the fields
                            cb_res = cb()
                            # TODO: we should stop masking exceptions in get_metadata_ls,
                            # and centralize logic regardless either it is for pyout or not
                            # and do parallelizaion on our end, so at large it is
                            if cb_res is None:
                                raise
                            for k, v in cb_res.items():
                                rec[k] = v
                except Exception as exc:
                    _add_exc_error(asset, rec, errors, exc)
            elif isinstance(asset, dict):
                # ready record
                # TODO: harmonization for pyout
                rec = asset
            else:
                raise TypeError(asset)

            if not rec:
                errors["Empty record"].append(asset)
                lgr.debug("Skipping a record for %s since empty", asset)
                continue
            out(rec)
    if errors:
        lgr.warning(
            "Failed to operate on some paths (empty records were listed):\n %s",
            "\n ".join("%s: %d paths" % (k, len(v))
                       for k, v in errors.items()),
        )
コード例 #13
0
def cli(user, password, geometry, start, end, uuid, name, download, sentinel,
        producttype, instrument, cloud, footprints, path, query, url, order_by,
        limit):
    """Search for Sentinel products and, optionally, download all the results
    and/or create a geojson file with the search result footprints.
    Beyond your Copernicus Open Access Hub user and password, you must pass a geojson file
    containing the geometry of the area you want to search for or the UUIDs of the products. If you
    don't specify the start and end dates, it will search in the last 24 hours.
    """

    _set_logger_handler()

    if user is None or password is None:
        try:
            user, password = requests.utils.get_netrc_auth(url)
        except TypeError:
            pass

    if user is None or password is None:
        raise click.UsageError(
            'Missing --user and --password. Please see docs '
            'for environment variables and .netrc support.')

    api = SentinelAPI(user, password, url)

    search_kwargs = {}
    if sentinel and not (producttype or instrument):
        search_kwargs["platformname"] = "Sentinel-" + sentinel

    if instrument and not producttype:
        search_kwargs["instrumentshortname"] = instrument

    if producttype:
        search_kwargs["producttype"] = producttype

    if cloud:
        if sentinel not in ['2', '3']:
            logger.error('Cloud cover is only supported for Sentinel 2 and 3.')
            exit(1)
        search_kwargs["cloudcoverpercentage"] = (0, cloud)

    if query is not None:
        search_kwargs.update((x.split('=') for x in query))

    if geometry is not None:
        search_kwargs['area'] = geojson_to_wkt(read_geojson(geometry))

    if uuid is not None:
        uuid_list = [x.strip() for x in uuid]
        products = {}
        for productid in uuid_list:
            try:
                products[productid] = api.get_product_odata(productid)
            except SentinelAPIError as e:
                if 'Invalid key' in e.msg:
                    logger.error('No product with ID \'%s\' exists on server',
                                 productid)
                    exit(1)
                else:
                    raise
    elif name is not None:
        search_kwargs["identifier"] = name[0] if len(
            name) == 1 else '(' + ' OR '.join(name) + ')'
        products = api.query(order_by=order_by, limit=limit, **search_kwargs)
    else:
        start = start or "19000101"
        end = end or "NOW"
        products = api.query(date=(start, end),
                             order_by=order_by,
                             limit=limit,
                             **search_kwargs)

    if footprints is True:
        footprints_geojson = api.to_geojson(products)
        with open(os.path.join(path, "search_footprints.geojson"),
                  "w") as outfile:
            outfile.write(gj.dumps(footprints_geojson))

    if download is True:
        product_infos, triggered, failed_downloads = api.download_all(
            products, path)
        if len(failed_downloads) > 0:
            with open(os.path.join(path, "corrupt_scenes.txt"),
                      "w") as outfile:
                for failed_id in failed_downloads:
                    outfile.write("%s : %s\n" %
                                  (failed_id, products[failed_id]['title']))
    else:
        for product_id, props in products.items():
            if uuid is None:
                logger.info('Product %s - %s', product_id, props['summary'])
            else:  # querying uuids has no summary key
                logger.info('Product %s - %s - %s MB', product_id,
                            props['title'],
                            round(int(props['size']) / (1024. * 1024.), 2))
        if uuid is None:
            logger.info('---')
            logger.info('%s scenes found with a total size of %.2f GB',
                        len(products), api.get_products_size(products))
コード例 #14
0
ファイル: cli.py プロジェクト: tumregels/lektor
 def fail(msg):
     if as_json:
         echo_json({'success': False, 'error': msg})
         sys.exit(1)
     raise click.UsageError('Could not find content file info: %s' % msg)
コード例 #15
0
ファイル: __main__.py プロジェクト: yashrathi-git/evem
def new(commit):
    """
    Create new event
    """
    if not commit:
        print(f"{Fore.LIGHTYELLOW_EX}Date Format: dd-mm-yyyy{Style.RESET_ALL}")
        print(
            f"{Fore.MAGENTA}Leave any date field empty for today's date{Style.RESET_ALL}\n"
        )
        while True:
            title = prompt("title")
            if not len(title) > 0:
                print(f"{Fore.RED}Title cannot be left blank{Style.RESET_ALL}")
            else:
                break
        short_description = prompt("short-description")
        date_created = prompt("date-of-event")
        base_date = prompt("base-date(Default: date-of-event)")

        if not short_description:
            short_description = title
        try:
            date_created = parse_date(date_created)
            base_date = parse_date(base_date)
            if date_created > datetime.date.today():
                raise Exception(
                    "date-of-event cannot be bigger than today's date.")
        except Exception as e:
            raise click.UsageError(f"{Fore.RED}{e}{Style.RESET_ALL}")
        if not base_date:
            base_date = date_created

        syntax = "period = (years:int, months:int, days:int), repeat = (int|*)"
        print(
            f"\n{Fore.MAGENTA}Date are calculated for time after `base-date`{Style.RESET_ALL}"
        )
        print(Fore.LIGHTYELLOW_EX + "Syntax: ")
        print(syntax)
        print("\nExamples:")
        print(
            "period = (0,1,0), repeat = 10      # means to remind every month for 10 times"
        )
        print(
            "period = (0,0,14), repeat = *      # means to remind every 14 days (forever)"
        )
        print(
            "period = (1,0,0), repeat = 1       # means remind after 1 year (1 time only)"
        )
        print(Style.RESET_ALL)
        print(
            f"{Fore.RED}Enter q to exit (There should be atleast one reminder){Style.RESET_ALL}\n"
        )

        event = Event(title,
                      short_description,
                      long_description="",
                      date_created=date_created)
        model_objects = {
            "event": event,
            "reminder_dates": reminder(event, base_date)
        }

        create_dir_if_not_exists(BASEDIR, "markdown")
        markdown_file = path_join(BASEDIR, "markdown", "description.md")
        template_markdown = path_join(BASEDIR, "templates", "markdown.md")
        with open(markdown_file, "w") as file:
            # Read from template markdown
            with open(template_markdown) as template_file:
                # Write starter template to this file
                file.write(template_file.read())

        pickle_object(model_objects)

        print(f"{Style.DIM}Created `description.md` file")
        print("Run following commands to complete event creation:")
        print(f"Edit the file with suitable description{Style.RESET_ALL}")
        print(f"\n>> {Fore.LIGHTBLUE_EX}nano {markdown_file}{Style.RESET_ALL}")
        print(
            f">> {Fore.LIGHTBLUE_EX}evem event new --commit{Style.RESET_ALL}")

    else:
        model_objects = unpickle_object()
        if not model_objects:
            raise click.BadOptionUsage(
                option_name="markdown",
                message=("Unable to find cached object."
                         " Run command without `-c/--commit` flag first."),
            )
        os.remove(path_join(BASEDIR, "cache", "__object__.cache"))

        event = model_objects["event"]

        event.long_description = read_markdown()
        event.html = parse_markdown(event)
        session = session_factory()
        session.add(event)
        for model in model_objects["reminder_dates"]:
            session.add(model)
        session.commit()
        session.close()
        print(
            f"{Style.BRIGHT}{Fore.GREEN}(\u2713){Style.RESET_ALL} Event successfully commited."
        )
コード例 #16
0
def delete_imported_package(ctx, from_json, wait_for_state, max_wait_seconds,
                            wait_interval_seconds, oda_instance_id, package_id,
                            if_match):

    if isinstance(oda_instance_id, six.string_types) and len(
            oda_instance_id.strip()) == 0:
        raise click.UsageError(
            'Parameter --oda-instance-id cannot be whitespace or empty string')

    if isinstance(package_id, six.string_types) and len(
            package_id.strip()) == 0:
        raise click.UsageError(
            'Parameter --package-id cannot be whitespace or empty string')

    kwargs = {}
    if if_match is not None:
        kwargs['if_match'] = if_match
    kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(
        ctx.obj['request_id'])
    client = cli_util.build_client('oda', 'odapackage', ctx)
    result = client.delete_imported_package(oda_instance_id=oda_instance_id,
                                            package_id=package_id,
                                            **kwargs)
    if wait_for_state:

        if hasattr(client, 'get_work_request') and callable(
                getattr(client, 'get_work_request')):
            try:
                wait_period_kwargs = {}
                if max_wait_seconds is not None:
                    wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
                if wait_interval_seconds is not None:
                    wait_period_kwargs[
                        'max_interval_seconds'] = wait_interval_seconds

                click.echo(
                    'Action completed. Waiting until the work request has entered state: {}'
                    .format(wait_for_state),
                    file=sys.stderr)
                result = oci.wait_until(
                    client,
                    client.get_work_request(
                        result.headers['opc-work-request-id']), 'status',
                    wait_for_state, **wait_period_kwargs)
            except oci.exceptions.MaximumWaitTimeExceeded as e:
                # If we fail, we should show an error, but we should still provide the information to the customer
                click.echo(
                    'Failed to wait until the work request entered the specified state. Please retrieve the work request to find its current state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                sys.exit(2)
            except Exception:
                click.echo(
                    'Encountered error while waiting for work request to enter the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                raise
        else:
            click.echo(
                'Unable to wait for the work request to enter the specified state',
                file=sys.stderr)
    cli_util.render_response(result, ctx)
コード例 #17
0
def main(source, repo_source_files, requirements_file, local_source_file,
         work_dir, python):
    """
    Bundle up a deployment package for AWS Lambda.

    From your local filesystem:

    \b
        $ make-lambda-package .
        ...
        dist/lambda-package.zip

    Or from a remote git repository:

    \b
        $ make-lambda-package https://github.com/NoRedInk/make-lambda-package.git
        ...
        vendor/dist/NoRedInk-make-lambda-package.zip

    Use # fragment to specify a commit or a branch:

    \b
        $ make-lambda-package https://github.com/NoRedInk/make-lambda-package.git#v1.0.0

    Dependencies specified with --requirements-file will built using a docker container
    that replicates AWS Lambda's execution environment, so that extension modules
    are correctly packaged.

    When packaging a local source, --work-dir defaults to `.`:

    \b
    * ./build will hold a virtualenv for building dependencies if specified.
    * ./dist is where the zipped package will be saved

    When packaging a remote source, --work-dir defaults to `./vendor`.
    """
    scm_source = fsutil.parse_path_or_url(source)
    paths = fsutil.decide_paths(scm_source, work_dir)

    if requirements_file:
        with open(os.devnull, 'w') as devnull:
            docker_retcode = subprocess.call(['docker', '--help'],
                                             stdout=devnull)
        if docker_retcode != 0:
            raise click.UsageError(
                "`docker` command doesn't seem to be available. "
                "It's required to package dependencies.")

    if not (requirements_file or repo_source_files or local_source_file):
        click.secho(
            'Warning: without --repo-source-files, --requirements-file, '
            'or --local-source-file, nothing will be included in the zip file. '
            'Assuming you have good reasons to do this and proceeding.',
            fg='yellow')

    fsutil.ensure_dirs(paths)

    if isinstance(scm_source, fsutil.RemoteSource):
        click.echo('Fetching repo..')
        scm.fetch_repo(scm_source.url, scm_source.ref, paths.src_dir)

    deps_file = None
    if requirements_file:
        click.echo('Building deps..')
        deps_file = deps.build_deps(paths, requirements_file, python)

    click.echo('Creating zip file..')
    archive.make_archive(paths, repo_source_files, local_source_file,
                         deps_file, python)

    click.echo(os.path.relpath(paths.zip_path, os.getcwd()))
コード例 #18
0
def update_imported_package(ctx, from_json, force, wait_for_state,
                            max_wait_seconds, wait_interval_seconds,
                            current_package_id, parameter_values,
                            oda_instance_id, package_id, freeform_tags,
                            defined_tags, is_replace_skills, if_match):

    if isinstance(oda_instance_id, six.string_types) and len(
            oda_instance_id.strip()) == 0:
        raise click.UsageError(
            'Parameter --oda-instance-id cannot be whitespace or empty string')

    if isinstance(package_id, six.string_types) and len(
            package_id.strip()) == 0:
        raise click.UsageError(
            'Parameter --package-id cannot be whitespace or empty string')
    if not force:
        if parameter_values or freeform_tags or defined_tags:
            if not click.confirm(
                    "WARNING: Updates to parameter-values and freeform-tags and defined-tags will replace any existing values. Are you sure you want to continue?"
            ):
                ctx.abort()

    kwargs = {}
    if is_replace_skills is not None:
        kwargs['is_replace_skills'] = is_replace_skills
    if if_match is not None:
        kwargs['if_match'] = if_match
    kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(
        ctx.obj['request_id'])

    _details = {}
    _details['currentPackageId'] = current_package_id
    _details['parameterValues'] = cli_util.parse_json_parameter(
        "parameter_values", parameter_values)

    if freeform_tags is not None:
        _details['freeformTags'] = cli_util.parse_json_parameter(
            "freeform_tags", freeform_tags)

    if defined_tags is not None:
        _details['definedTags'] = cli_util.parse_json_parameter(
            "defined_tags", defined_tags)

    client = cli_util.build_client('oda', 'odapackage', ctx)
    result = client.update_imported_package(
        oda_instance_id=oda_instance_id,
        package_id=package_id,
        update_imported_package_details=_details,
        **kwargs)
    if wait_for_state:

        if hasattr(client, 'get_work_request') and callable(
                getattr(client, 'get_work_request')):
            try:
                wait_period_kwargs = {}
                if max_wait_seconds is not None:
                    wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
                if wait_interval_seconds is not None:
                    wait_period_kwargs[
                        'max_interval_seconds'] = wait_interval_seconds

                click.echo(
                    'Action completed. Waiting until the work request has entered state: {}'
                    .format(wait_for_state),
                    file=sys.stderr)
                result = oci.wait_until(
                    client,
                    client.get_work_request(
                        result.headers['opc-work-request-id']), 'status',
                    wait_for_state, **wait_period_kwargs)
            except oci.exceptions.MaximumWaitTimeExceeded as e:
                # If we fail, we should show an error, but we should still provide the information to the customer
                click.echo(
                    'Failed to wait until the work request entered the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                sys.exit(2)
            except Exception:
                click.echo(
                    'Encountered error while waiting for work request to enter the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                raise
        else:
            click.echo(
                'Unable to wait for the work request to enter the specified state',
                file=sys.stderr)
    cli_util.render_response(result, ctx)
コード例 #19
0
ファイル: core.py プロジェクト: arkhotech/ligth-workflow
def rename(config, copy, canonical, debug, dry_run,  # pylint: disable-msg=too-many-arguments
           episode, ignore_filelist, log_file,  # pylint: disable-msg=too-many-arguments
           log_level, name, no_cache, output_format,  # pylint: disable-msg=too-many-arguments
           organise, partial, quiet, recursive,  # pylint: disable-msg=too-many-arguments
           rename_dir, regex, season, show,  # pylint: disable-msg=too-many-arguments
           show_override, specials, symlink, the,  # pylint: disable-msg=too-many-arguments
           paths):  # pylint: disable-msg=too-many-arguments

    if debug:
        log_level = 10
    start_logging(log_file, log_level, quiet)
    logger = functools.partial(log.log, 26)

    if dry_run or debug:
        start_dry_run(logger)

    if copy and symlink:
        raise click.UsageError("You can't use --copy and --symlink at the same time.")

    if not paths:
        paths = [os.getcwd()]

    for current_dir, filename in build_file_list(paths, recursive, ignore_filelist):
        try:
            tv = TvRenamr(current_dir, debug, dry_run, no_cache)

            _file = File(**tv.extract_details_from_file(
                filename,
                user_regex=regex,
                partial=partial,
            ))
            # TODO: Warn setting season & episode will override *all* episodes
            _file.user_overrides(show, season, episode)
            _file.safety_check()

            conf = get_config(config)

            for ep in _file.episodes:
                canonical = conf.get(
                    'canonical',
                    _file.show_name,
                    default=ep.file_.show_name,
                    override=canonical
                )

                # TODO: Warn setting name will override *all* episodes
                ep.title = tv.retrieve_episode_title(
                    ep,
                    canonical=canonical,
                    override=name,
                )

                # TODO: make this a sanitisation method on ep?
                ep.title = ep.title.replace('/', '-')

            show = conf.get_output(_file.show_name, override=show_override)
            the = conf.get('the', show=_file.show_name, override=the)
            _file.show_name = tv.format_show_name(show, the=the)

            _file.set_output_format(conf.get(
                'format',
                _file.show_name,
                default=_file.output_format,
                override=output_format
            ))

            copy = conf.get(
                'copy',
                _file.show_name,
                default=False,
                override=copy
            )
            organise = conf.get(
                'organise',
                _file.show_name,
                default=True,
                override=organise
            )
            rename_dir = conf.get(
                'renamed',
                _file.show_name,
                default=current_dir,
                override=rename_dir
            )
            specials_folder = conf.get(
                'specials_folder',
                _file.show_name,
                default='Season 0',
                override=specials,
            )
            symlink = conf.get(
                'symlink',
                _file.show_name,
                default=False,
                override=symlink
            )
            path = tv.build_path(
                _file,
                rename_dir=rename_dir,
                organise=organise,
                specials_folder=specials_folder,
            )

            tv.rename(filename, path, copy, symlink)
        except errors.NetworkException:
            if dry_run or debug:
                stop_dry_run(logger)
            sys.exit(1)
        except (AttributeError,
                errors.EmptyEpisodeTitleException,
                errors.EpisodeNotFoundException,
                errors.IncorrectRegExpException,
                errors.InvalidXMLException,
                errors.MissingInformationException,
                errors.OutputFormatMissingSyntaxException,
                errors.PathExistsException,
                errors.ShowNotFoundException,
                errors.UnexpectedFormatException) as e:
            continue
        except Exception as e:
            if debug:
                # In debug mode, show the full traceback.
                raise
            for msg in e.args:
                log.critical('Error: %s', msg)
            sys.exit(1)

        # if we're not doing a dry run add a blank line for clarity
        if not (debug and dry_run):
            log.info('')

    if dry_run or debug:
        stop_dry_run(logger)
コード例 #20
0
ファイル: bpt.py プロジェクト: tekka007/ArduinoBoards
def update_index(ctx, package_name, force, output_board_index,
                 output_board_dir):
    """Update board package in the published index.

    This command will archive and compress a board package and add it to the
    board index file.  A sanity check will be done to ensure the package has a
    later version than currently in the board index, however this can be disabled
    with the --force option.

    The command takes one argument, the name of the board package to update.
    This should be the name of the package as defined in the board package config
    INI file section name (use the check_updates command to list all the packages
    from the config if unsure).
    """
    ctx.obj.load_data()  # Load all the package config & metadata.
    # Use the input board index as the output if none is specified.
    if output_board_index is None:
        output_board_index = ctx.obj.board_index_file
    # Validate that the specified package exists in the config.
    package = ctx.obj.board_config.get_package(package_name)
    if package is None:
        raise click.BadParameter(
            'Could not find specified package in the board package config INI file! Run check_updates command to list all configured package names.',
            param_hint='package')
    # If not in force mode do a sanity check to make sure the package source
    # has a newer version than in the index.
    if not force:
        # Get all the associated packages in the board index.
        index_packages = list(
            ctx.obj.board_index.get_platforms(package.get_parent(),
                                              package.get_name()))
        # Do version check if packages were found in the index.
        if len(index_packages) > 0:
            # Find the most recent version in the index packages.
            latest = max(
                map(lambda x: parse_version(x.get('version', '')),
                    index_packages))
            # Warn if the latest published package is the same or newer than the
            # current package from its origin source.
            if latest >= parse_version(package.get_version()):
                raise click.UsageError(
                    'Specified package is older than the version currently in the index!  Use the --force option to force this update if necessary.'
                )
    # Create the output directory if it doesn't exist.
    if not os.path.exists(output_board_dir):
        os.makedirs(output_board_dir)
    # Build the archive with the board package data and write it to the target
    # directory.
    archive_path = os.path.join(output_board_dir, package.get_archive_name())
    size, sha256 = package.write_archive(archive_path)
    click.echo('Created board package archive: {0}'.format(archive_path))
    # Convert the package template from JSON to a platform metadata dict that
    # can be inserted in the board index.
    template_params = {
        'version': package.get_version(),
        'filename': package.get_archive_name(),
        'sha256': sha256,
        'size': size
    }
    platform = json.loads(package.get_template().format(**template_params))
    # Add the new pacakge metadata to the board index.
    ctx.obj.board_index.add_platform(package.get_parent(), platform)
    # Write out the new board index JSON.
    new_index = ctx.obj.board_index.write_json()
    with open(output_board_index, 'w') as bi:
        bi.write(new_index)
    click.echo(
        'Wrote updated board index JSON: {0}'.format(output_board_index))
コード例 #21
0
 def show_help(ctx, param, value):
     if value and not ctx.resilient_parsing:
         raise click.UsageError(ctx.get_help())
コード例 #22
0
def create_transfer_appliance(ctx, from_json, wait_for_state, max_wait_seconds,
                              wait_interval_seconds, id,
                              customer_shipping_address,
                              minimum_storage_capacity_in_terabytes):

    if isinstance(id, six.string_types) and len(id.strip()) == 0:
        raise click.UsageError(
            'Parameter --id cannot be whitespace or empty string')

    kwargs = {}

    _details = {}

    if customer_shipping_address is not None:
        _details['customerShippingAddress'] = cli_util.parse_json_parameter(
            "customer_shipping_address", customer_shipping_address)

    if minimum_storage_capacity_in_terabytes is not None:
        _details[
            'minimumStorageCapacityInTerabytes'] = minimum_storage_capacity_in_terabytes

    client = cli_util.build_client('dts', 'transfer_appliance', ctx)
    result = client.create_transfer_appliance(
        id=id, create_transfer_appliance_details=_details, **kwargs)
    if wait_for_state:

        if hasattr(client, 'get_transfer_appliance') and callable(
                getattr(client, 'get_transfer_appliance')):
            try:
                wait_period_kwargs = {}
                if max_wait_seconds is not None:
                    wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
                if wait_interval_seconds is not None:
                    wait_period_kwargs[
                        'max_interval_seconds'] = wait_interval_seconds

                click.echo(
                    'Action completed. Waiting until the resource has entered state: {}'
                    .format(wait_for_state),
                    file=sys.stderr)
                result = oci.wait_until(
                    client, client.get_transfer_appliance(result.data.id),
                    'lifecycle_state', wait_for_state, **wait_period_kwargs)
            except oci.exceptions.MaximumWaitTimeExceeded as e:
                # If we fail, we should show an error, but we should still provide the information to the customer
                click.echo(
                    'Failed to wait until the resource entered the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                sys.exit(2)
            except Exception:
                click.echo(
                    'Encountered error while waiting for resource to enter the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                raise
        else:
            click.echo(
                'Unable to wait for the resource to enter the specified state',
                file=sys.stderr)
    cli_util.render_response(result, ctx)
コード例 #23
0
 def _range_check(p):
     try:
         p = int(p)
     except ValueError:
         raise click.UsageError("{} is not an int".format(p))
コード例 #24
0
def update_transfer_appliance(ctx, from_json, force, wait_for_state,
                              max_wait_seconds, wait_interval_seconds, id,
                              transfer_appliance_label, lifecycle_state,
                              customer_shipping_address, expected_return_date,
                              pickup_window_start_time, pickup_window_end_time,
                              minimum_storage_capacity_in_terabytes, if_match):

    if isinstance(id, six.string_types) and len(id.strip()) == 0:
        raise click.UsageError(
            'Parameter --id cannot be whitespace or empty string')

    if isinstance(transfer_appliance_label, six.string_types) and len(
            transfer_appliance_label.strip()) == 0:
        raise click.UsageError(
            'Parameter --transfer-appliance-label cannot be whitespace or empty string'
        )
    if not force:
        if customer_shipping_address:
            if not click.confirm(
                    "WARNING: Updates to customer-shipping-address will replace any existing values. Are you sure you want to continue?"
            ):
                ctx.abort()

    kwargs = {}
    if if_match is not None:
        kwargs['if_match'] = if_match

    _details = {}

    if lifecycle_state is not None:
        _details['lifecycleState'] = lifecycle_state

    if customer_shipping_address is not None:
        _details['customerShippingAddress'] = cli_util.parse_json_parameter(
            "customer_shipping_address", customer_shipping_address)

    if expected_return_date is not None:
        _details['expectedReturnDate'] = expected_return_date

    if pickup_window_start_time is not None:
        _details['pickupWindowStartTime'] = pickup_window_start_time

    if pickup_window_end_time is not None:
        _details['pickupWindowEndTime'] = pickup_window_end_time

    if minimum_storage_capacity_in_terabytes is not None:
        _details[
            'minimumStorageCapacityInTerabytes'] = minimum_storage_capacity_in_terabytes

    client = cli_util.build_client('dts', 'transfer_appliance', ctx)
    result = client.update_transfer_appliance(
        id=id,
        transfer_appliance_label=transfer_appliance_label,
        update_transfer_appliance_details=_details,
        **kwargs)
    if wait_for_state:

        if hasattr(client, 'get_transfer_appliance') and callable(
                getattr(client, 'get_transfer_appliance')):
            try:
                wait_period_kwargs = {}
                if max_wait_seconds is not None:
                    wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
                if wait_interval_seconds is not None:
                    wait_period_kwargs[
                        'max_interval_seconds'] = wait_interval_seconds

                click.echo(
                    'Action completed. Waiting until the resource has entered state: {}'
                    .format(wait_for_state),
                    file=sys.stderr)
                result = oci.wait_until(
                    client, client.get_transfer_appliance(result.data.id),
                    'lifecycle_state', wait_for_state, **wait_period_kwargs)
            except oci.exceptions.MaximumWaitTimeExceeded as e:
                # If we fail, we should show an error, but we should still provide the information to the customer
                click.echo(
                    'Failed to wait until the resource entered the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                sys.exit(2)
            except Exception:
                click.echo(
                    'Encountered error while waiting for resource to enter the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                raise
        else:
            click.echo(
                'Unable to wait for the resource to enter the specified state',
                file=sys.stderr)
    cli_util.render_response(result, ctx)
コード例 #25
0
ファイル: auto_scaling_group.py プロジェクト: pasha-r/senza
def component_auto_scaling_group(definition, configuration, args, info, force,
                                 account_info):
    definition = ensure_keys(definition, "Resources")

    # launch configuration
    config_name = configuration["Name"] + "Config"
    definition["Resources"][config_name] = {
        "Type": "AWS::AutoScaling::LaunchConfiguration",
        "Properties": {
            "InstanceType":
            configuration["InstanceType"],
            "ImageId": {
                "Fn::FindInMap":
                ["Images", {
                    "Ref": "AWS::Region"
                }, configuration["Image"]]
            },
            "AssociatePublicIpAddress":
            configuration.get('AssociatePublicIpAddress', False),
            "EbsOptimized":
            configuration.get('EbsOptimized', False)
        }
    }

    if 'IamRoles' in configuration:
        logical_id = configuration['Name'] + 'InstanceProfile'
        roles = configuration['IamRoles']
        if len(roles) > 1:
            for role in roles:
                if isinstance(role, dict):
                    raise click.UsageError(
                        'Cannot merge policies of Cloud Formation references ({"Ref": ".."}): '
                        + 'You can use at most one IAM role with "Ref".')
            logical_role_id = configuration['Name'] + 'Role'
            definition['Resources'][logical_role_id] = {
                'Type': 'AWS::IAM::Role',
                'Properties': {
                    "AssumeRolePolicyDocument": {
                        "Version":
                        "2012-10-17",
                        "Statement": [{
                            "Effect": "Allow",
                            "Principal": {
                                "Service": ["ec2.amazonaws.com"]
                            },
                            "Action": ["sts:AssumeRole"]
                        }]
                    },
                    'Path': '/',
                    'Policies': get_merged_policies(roles)
                }
            }
            instance_profile_roles = [{'Ref': logical_role_id}]
        elif isinstance(roles[0], dict):
            instance_profile_roles = [
                resolve_referenced_resource(roles[0], args.region)
            ]
        else:
            instance_profile_roles = roles
        definition['Resources'][logical_id] = {
            'Type': 'AWS::IAM::InstanceProfile',
            'Properties': {
                'Path': '/',
                'Roles': instance_profile_roles
            }
        }
        definition["Resources"][config_name]["Properties"][
            "IamInstanceProfile"] = {
                'Ref': logical_id
            }

    if "SecurityGroups" in configuration:
        definition["Resources"][config_name]["Properties"]["SecurityGroups"] = \
            resolve_security_groups(configuration["SecurityGroups"], args.region)

    if "UserData" in configuration:
        definition["Resources"][config_name]["Properties"]["UserData"] = {
            "Fn::Base64": configuration["UserData"]
        }

    # auto scaling group
    asg_name = configuration["Name"]
    asg_success = ["1", "PT15M"]
    if "AutoScaling" in configuration:
        if "SuccessRequires" in configuration["AutoScaling"]:
            asg_success = normalize_asg_success(
                configuration["AutoScaling"]["SuccessRequires"])

    tags = [
        # Tag "Name"
        {
            "Key": "Name",
            "PropagateAtLaunch": True,
            "Value": "{0}-{1}".format(info["StackName"], info["StackVersion"])
        },
        # Tag "StackName"
        {
            "Key": "StackName",
            "PropagateAtLaunch": True,
            "Value": info["StackName"],
        },
        # Tag "StackVersion"
        {
            "Key": "StackVersion",
            "PropagateAtLaunch": True,
            "Value": info["StackVersion"]
        }
    ]

    if "Tags" in configuration:
        for tag in configuration["Tags"]:
            tags.append({
                "Key": tag["Key"],
                "PropagateAtLaunch": True,
                "Value": tag["Value"]
            })

    definition["Resources"][asg_name] = {
        "Type": "AWS::AutoScaling::AutoScalingGroup",
        # wait to get a signal from an amount of servers to signal that it booted
        "CreationPolicy": {
            "ResourceSignal": {
                "Count": asg_success[0],
                "Timeout": asg_success[1]
            }
        },
        "Properties": {
            # for our operator some notifications
            "LaunchConfigurationName": {
                "Ref": config_name
            },
            "VPCZoneIdentifier": {
                "Fn::FindInMap":
                ["ServerSubnets", {
                    "Ref": "AWS::Region"
                }, "Subnets"]
            },
            "Tags": tags
        }
    }

    asg_properties = definition["Resources"][asg_name]["Properties"]

    if "OperatorTopicId" in info:
        asg_properties["NotificationConfiguration"] = {
            "NotificationTypes": [
                "autoscaling:EC2_INSTANCE_LAUNCH",
                "autoscaling:EC2_INSTANCE_LAUNCH_ERROR",
                "autoscaling:EC2_INSTANCE_TERMINATE",
                "autoscaling:EC2_INSTANCE_TERMINATE_ERROR"
            ],
            "TopicARN":
            resolve_topic_arn(args.region, info["OperatorTopicId"])
        }

    default_health_check_type = 'EC2'

    if "ElasticLoadBalancer" in configuration:
        if isinstance(configuration["ElasticLoadBalancer"], str):
            asg_properties["LoadBalancerNames"] = [{
                "Ref":
                configuration["ElasticLoadBalancer"]
            }]
        elif isinstance(configuration["ElasticLoadBalancer"], list):
            asg_properties["LoadBalancerNames"] = [{
                'Ref': ref
            } for ref in configuration["ElasticLoadBalancer"]]
        # use ELB health check by default
        default_health_check_type = 'ELB'
    if "ElasticLoadBalancerV2" in configuration:
        if isinstance(configuration["ElasticLoadBalancerV2"], str):
            asg_properties["TargetGroupARNs"] = [{
                "Ref":
                configuration["ElasticLoadBalancerV2"] + 'TargetGroup'
            }]
        elif isinstance(configuration["ElasticLoadBalancerV2"], list):
            asg_properties["TargetGroupARNs"] = [{
                'Ref': ref
            } for ref in configuration["ElasticLoadBalancerV2"] + 'TargetGroup'
                                                 ]
        # use ELB health check by default
        default_health_check_type = 'ELB'

    asg_properties['HealthCheckType'] = configuration.get(
        'HealthCheckType', default_health_check_type)
    asg_properties['HealthCheckGracePeriod'] = configuration.get(
        'HealthCheckGracePeriod', 300)

    if "AutoScaling" in configuration:
        as_conf = configuration["AutoScaling"]
        asg_properties["MaxSize"] = as_conf["Maximum"]
        asg_properties["MinSize"] = as_conf["Minimum"]
        asg_properties["DesiredCapacity"] = max(
            int(as_conf["Minimum"]), int(as_conf.get('DesiredCapacity', 1)))

        default_scaling_adjustment = as_conf.get("ScalingAdjustment", 1)
        default_cooldown = as_conf.get("Cooldown", "60")

        # ScaleUp policy
        scale_up_name = asg_name + "ScaleUp"
        scale_up_adjustment = int(
            as_conf.get("ScaleUpAdjustment", default_scaling_adjustment))
        scale_up_cooldown = as_conf.get("ScaleUpCooldown", default_cooldown)

        definition["Resources"][scale_up_name] = create_autoscaling_policy(
            asg_name, scale_up_name, scale_up_adjustment, scale_up_cooldown,
            definition)

        # ScaleDown policy
        scale_down_name = asg_name + "ScaleDown"
        scale_down_adjustment = (-1) * int(
            as_conf.get("ScaleDownAdjustment", default_scaling_adjustment))
        scale_down_cooldown = as_conf.get("ScaleDownCooldown",
                                          default_cooldown)

        definition["Resources"][scale_down_name] = create_autoscaling_policy(
            asg_name, scale_down_name, scale_down_adjustment,
            scale_down_cooldown, definition)

        if "MetricType" in as_conf:
            metric_type = as_conf["MetricType"]
            metricfns = {
                "CPU": metric_cpu,
                "NetworkIn": metric_network,
                "NetworkOut": metric_network
            }
            # lowercase cpu is an acceptable metric, be compatible
            if metric_type.lower() not in map(lambda t: t.lower(),
                                              metricfns.keys()):
                raise click.UsageError(
                    'Auto scaling MetricType "{}" not supported.'.format(
                        metric_type))
            metricfn = metricfns[metric_type]
            definition = metricfn(asg_name, definition, as_conf, args, info,
                                  force)
    else:
        asg_properties["MaxSize"] = 1
        asg_properties["MinSize"] = 1

    for res in (config_name, asg_name):
        props = definition['Resources'][res]['Properties']
        additional_cf_properties = ADDITIONAL_PROPERTIES.get(
            definition['Resources'][res]['Type'])
        properties_allowed_to_overwrite = (
            set(props.keys()) - SENZA_PROPERTIES) | additional_cf_properties
        for key in properties_allowed_to_overwrite:
            if key in configuration:
                props[key] = configuration[key]

    return definition
コード例 #26
0
ファイル: run.py プロジェクト: ahal/services
def cmd(
    ctx,
    project,
    quiet,
    nix_shell,
    taskcluster_secrets,
    taskcluster_client_id,
    taskcluster_access_token,
):

    project_config = please_cli.config.PROJECTS.get(project, {})
    run_type = project_config.get('run')
    run_options = project_config.get('run_options', {})

    if not run_type:
        raise click.ClickException(
            'Application `{}` is not configured to be runnable.')

    host = run_options.get('host', 'localhost')
    if please_cli.config.IN_DOCKER:
        host = run_options.get('host', '0.0.0.0')
    port = str(run_options.get('port', 8000))
    schema = 'https://'
    project_name = project.replace('-', '_')
    ca_cert_file = os.path.join(please_cli.config.TMP_DIR, 'certs', 'ca.crt')
    server_cert_file = os.path.join(please_cli.config.TMP_DIR, 'certs',
                                    'server.crt')
    server_key_file = os.path.join(please_cli.config.TMP_DIR, 'certs',
                                   'server.key')

    os.environ['DEBUG'] = 'true'
    os.environ['PROJECT_NAME'] = project_name

    pg_host = please_cli.config.PROJECTS['postgresql']['run_options'].get(
        'host', host)
    pg_port = str(
        please_cli.config.PROJECTS['postgresql']['run_options']['port'])

    if 'postgresql' in project_config.get('requires', []):

        dbname = 'services'

        click.echo(' => Checking if database `{}` exists ... '.format(dbname),
                   nl=False)
        with click_spinner.spinner():
            result, output, error = ctx.invoke(
                please_cli.shell.cmd,
                project=project,
                quiet=True,
                command=' '.join([
                    'psql',
                    '-lqt',
                    '-h',
                    pg_host,
                    '-p',
                    pg_port,
                ]),
                nix_shell=nix_shell,
            )

        database_exists = False
        for line in output.split('\n'):
            column1 = line.split('|')[0].strip()
            if column1 == dbname:
                database_exists = True
                break

        if result != 0:
            click.secho('ERROR', fg='red')
            raise click.UsageError('Could not connect to the database.\n\n'
                                   'Please run:\n\n'
                                   '    ./please run postgresql\n\n'
                                   'in a separate terminal.')

        please_cli.utils.check_result(result, output)

        if not database_exists:
            click.echo(' => Creating `{}` database ` ... '.format(dbname),
                       nl=False)
            with click_spinner.spinner():
                result, output, error = ctx.invoke(
                    please_cli.shell.cmd,
                    project=project,
                    command=' '.join([
                        'createdb',
                        '-h',
                        pg_host,
                        '-p',
                        pg_port,
                        dbname,
                    ]),
                    nix_shell=nix_shell,
                )
            please_cli.utils.check_result(result, output)

        os.environ['DATABASE_URL'] = 'postgresql://{}:{}/{}'.format(
            pg_host, pg_port, dbname)

    if 'redis' in project_config.get('requires', []):
        # TODO: Support checking if redis is running and support starting redis using please.
        os.environ['REDIS_URL'] = 'redis://localhost:6379'

    if run_type == 'POSTGRESQL':
        data_dir = run_options.get(
            'data_dir', os.path.join(please_cli.config.TMP_DIR, 'postgresql'))

        if not os.path.isdir(data_dir):
            click.echo(
                ' => Initialize database folder `{}` ... '.format(data_dir),
                nl=False)
            with click_spinner.spinner():
                result, output, error = ctx.invoke(
                    please_cli.shell.cmd,
                    project=project,
                    command='initdb -D {} --auth=trust'.format(data_dir),
                    nix_shell=nix_shell,
                )
            please_cli.utils.check_result(result, output)

        schema = ''
        command = [
            'postgres',
            '-D',
            data_dir,
            '-h',
            host,
            '-p',
            port,
        ]

    elif run_type == 'FLASK':

        for env_name, env_value in run_options.get('envs', {}).items():
            env_name = env_name.replace('-', '_').upper()
            os.environ[env_name] = env_value

        if not os.path.exists(ca_cert_file) or \
           not os.path.exists(server_cert_file) or \
           not os.path.exists(server_key_file):
            ctx.invoke(
                please_cli.create_certs.cmd,
                certificates_dir=os.path.join(please_cli.config.TMP_DIR,
                                              'certs'),
            )

        project_cache_dir = os.path.join(please_cli.config.TMP_DIR, 'cache',
                                         project_name)
        if not os.path.isdir(project_cache_dir):
            os.makedirs(project_cache_dir)

        os.environ['CACHE_TYPE'] = 'filesystem'
        os.environ['CACHE_DIR'] = project_cache_dir
        os.environ['APP_SETTINGS'] = os.path.join(please_cli.config.ROOT_DIR,
                                                  'src', project_name,
                                                  'settings.py')
        os.environ['APP_URL'] = '{}{}:{}'.format(schema, host, port)
        os.environ['CORS_ORIGINS'] = '*'

        command = [
            'gunicorn',
            project_name + '.flask:app',
            '--bind',
            '{}:{}'.format(host, port),
            '--ca-certs={}'.format(ca_cert_file),
            '--certfile={}'.format(server_cert_file),
            '--keyfile={}'.format(server_key_file),
            '--workers',
            '2',
            '--timeout',
            '3600',
            '--reload',
            '--reload-engine=poll',
            '--log-file',
            '-',
        ]

    elif run_type == 'SPHINX':

        schema = 'http://'
        command = [
            'HOST=' + host,
            'PORT=' + port,
            'python',
            'run.py',
        ]

    elif run_type == 'ELM':

        if not os.path.exists(ca_cert_file) or \
           not os.path.exists(server_cert_file) or \
           not os.path.exists(server_key_file):
            ctx.invoke(
                please_cli.create_certs.cmd,
                certificates_dir=os.path.join(please_cli.config.TMP_DIR,
                                              'certs'),
            )

        os.environ['WEBPACK_RELEASE_VERSION'] = please_cli.config.VERSION
        os.environ['WEBPACK_RELEASE_CHANNEL'] = 'development'
        os.environ['SSL_CACERT'] = ca_cert_file
        os.environ['SSL_CERT'] = server_cert_file
        os.environ['SSL_KEY'] = server_key_file

        for env_name, env_value in run_options.get('envs', {}).items():
            env_name = 'WEBPACK_' + env_name.replace('-', '_').upper()
            os.environ[env_name] = env_value

        # XXX: once we move please_cli.config.PROJECTS to nix we wont need this
        for require in project_config.get('requires', []):
            env_name = 'WEBPACK_{}_URL'.format(
                require.replace('-', '_').upper())
            env_value = '{}://{}:{}'.format(
                please_cli.config.PROJECTS[require]['run_options'].get(
                    'schema', 'https'),
                please_cli.config.PROJECTS[require]['run_options'].get(
                    'host', host),
                please_cli.config.PROJECTS[require]['run_options']['port'],
            )
            os.environ[env_name] = env_value

        command = [
            'webpack-dev-server',
            '--host',
            host,
            '--port',
            port,
            '--config',
            os.path.join(please_cli.config.ROOT_DIR, 'src', project_name,
                         'webpack.config.js'),
        ]

    click.echo(' => Running {} on {}{}:{} ...'.format(project, schema, host,
                                                      port))
    returncode, output, error = ctx.invoke(
        please_cli.shell.cmd,
        project=project,
        quiet=quiet,
        command=' '.join(command),
        nix_shell=nix_shell,
        taskcluster_secrets=taskcluster_secrets,
        taskcluster_client_id=taskcluster_client_id,
        taskcluster_access_token=taskcluster_access_token,
    )
    sys.exit(returncode)
コード例 #27
0
ファイル: organization_cli.py プロジェクト: zhiiker/oci-cli
def update_organization(ctx, from_json, wait_for_state, max_wait_seconds,
                        wait_interval_seconds, organization_id,
                        default_ucm_subscription_id, if_match):

    if isinstance(organization_id, six.string_types) and len(
            organization_id.strip()) == 0:
        raise click.UsageError(
            'Parameter --organization-id cannot be whitespace or empty string')

    kwargs = {}
    if if_match is not None:
        kwargs['if_match'] = if_match
    kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(
        ctx.obj['request_id'])

    _details = {}
    _details['defaultUcmSubscriptionId'] = default_ucm_subscription_id

    client = cli_util.build_client('tenant_manager_control_plane',
                                   'organization', ctx)
    result = client.update_organization(organization_id=organization_id,
                                        update_organization_details=_details,
                                        **kwargs)
    if wait_for_state:

        if hasattr(client, 'get_work_request') and callable(
                getattr(client, 'get_work_request')):
            try:
                wait_period_kwargs = {}
                if max_wait_seconds is not None:
                    wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
                if wait_interval_seconds is not None:
                    wait_period_kwargs[
                        'max_interval_seconds'] = wait_interval_seconds

                click.echo(
                    'Action completed. Waiting until the work request has entered state: {}'
                    .format(wait_for_state),
                    file=sys.stderr)
                result = oci.wait_until(
                    client,
                    client.get_work_request(
                        result.headers['opc-work-request-id']), 'status',
                    wait_for_state, **wait_period_kwargs)
            except oci.exceptions.MaximumWaitTimeExceeded as e:
                # If we fail, we should show an error, but we should still provide the information to the customer
                click.echo(
                    'Failed to wait until the work request entered the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                sys.exit(2)
            except Exception:
                click.echo(
                    'Encountered error while waiting for work request to enter the specified state. Outputting last known resource state',
                    file=sys.stderr)
                cli_util.render_response(result, ctx)
                raise
        else:
            click.echo(
                'Unable to wait for the work request to enter the specified state',
                file=sys.stderr)
    cli_util.render_response(result, ctx)
コード例 #28
0
def update_appliance_export_job(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, appliance_export_job_id, bucket_name, prefix, range_start, range_end, display_name, lifecycle_state, lifecycle_state_details, manifest_file, manifest_md5, number_of_objects, total_size_in_bytes, first_object, last_object, next_object, customer_shipping_address, freeform_tags, defined_tags, if_match):

    if isinstance(appliance_export_job_id, six.string_types) and len(appliance_export_job_id.strip()) == 0:
        raise click.UsageError('Parameter --appliance-export-job-id cannot be whitespace or empty string')
    if not force:
        if customer_shipping_address or freeform_tags or defined_tags:
            if not click.confirm("WARNING: Updates to customer-shipping-address and freeform-tags and defined-tags will replace any existing values. Are you sure you want to continue?"):
                ctx.abort()

    kwargs = {}
    if if_match is not None:
        kwargs['if_match'] = if_match
    kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])

    _details = {}

    if bucket_name is not None:
        _details['bucketName'] = bucket_name

    if prefix is not None:
        _details['prefix'] = prefix

    if range_start is not None:
        _details['rangeStart'] = range_start

    if range_end is not None:
        _details['rangeEnd'] = range_end

    if display_name is not None:
        _details['displayName'] = display_name

    if lifecycle_state is not None:
        _details['lifecycleState'] = lifecycle_state

    if lifecycle_state_details is not None:
        _details['lifecycleStateDetails'] = lifecycle_state_details

    if manifest_file is not None:
        _details['manifestFile'] = manifest_file

    if manifest_md5 is not None:
        _details['manifestMd5'] = manifest_md5

    if number_of_objects is not None:
        _details['numberOfObjects'] = number_of_objects

    if total_size_in_bytes is not None:
        _details['totalSizeInBytes'] = total_size_in_bytes

    if first_object is not None:
        _details['firstObject'] = first_object

    if last_object is not None:
        _details['lastObject'] = last_object

    if next_object is not None:
        _details['nextObject'] = next_object

    if customer_shipping_address is not None:
        _details['customerShippingAddress'] = cli_util.parse_json_parameter("customer_shipping_address", customer_shipping_address)

    if freeform_tags is not None:
        _details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)

    if defined_tags is not None:
        _details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)

    client = cli_util.build_client('dts', 'appliance_export_job', ctx)
    result = client.update_appliance_export_job(
        appliance_export_job_id=appliance_export_job_id,
        update_appliance_export_job_details=_details,
        **kwargs
    )
    if wait_for_state:

        if hasattr(client, 'get_appliance_export_job') and callable(getattr(client, 'get_appliance_export_job')):
            try:
                wait_period_kwargs = {}
                if max_wait_seconds is not None:
                    wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
                if wait_interval_seconds is not None:
                    wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds

                click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
                result = oci.wait_until(client, client.get_appliance_export_job(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
            except oci.exceptions.MaximumWaitTimeExceeded as e:
                # If we fail, we should show an error, but we should still provide the information to the customer
                click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
                cli_util.render_response(result, ctx)
                sys.exit(2)
            except Exception:
                click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
                cli_util.render_response(result, ctx)
                raise
        else:
            click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
    cli_util.render_response(result, ctx)
コード例 #29
0
                                     resumable=True)
        req = gcs_service.objects().insert(
            bucket=bucket,
            name=os.path.basename(archive.name),
            media_body=media,
            body={"cacheControl": "public,max-age=31536000"})
        resp = None
        while resp is None:
            status, resp = req.next_chunk()
            if status:
                print "Uploaded %d%%." % int(status.progress() * 100)
        click.echo('...done!')
    except HttpError, amy:
        if amy.resp.status == 403:
            raise click.UsageError(
                'You don\'t have permission to write to GCS bucket %s. Fix this or specify a different bucket to use.'
                % bucket)
        else:
            raise amy

    # Invoke the container builder API
    ccb_service = discovery.build(
        'cloudbuild',
        'v1',
        credentials=credentials,
        discoveryServiceUrl=
        "https://content-cloudbuild.googleapis.com/$discovery/rest?version=v1")

    req = ccb_service.projects().builds().create(projectId=project_id,
                                                 body=cb_request_body)
コード例 #30
0
def create_boot_volume_extended(ctx, **kwargs):
    if (kwargs['source_boot_volume_id'] and kwargs['boot_volume_backup_id']) or (kwargs['source_boot_volume_id'] and kwargs['source_volume_replica_id']) or \
            (kwargs['boot_volume_backup_id'] and kwargs['source_volume_replica_id']):
        raise click.UsageError(
            'You can only specify one of either --source-boot-volume-id, --boot-volume-backup-id or --source-volume-replica-id option'
        )

    if not kwargs['source_boot_volume_id'] and not kwargs[
            'boot_volume_backup_id'] and not kwargs['source_volume_replica_id']:
        raise click.UsageError(
            'An empty boot volume cannot be created. Please specify either --boot-volume-backup-id, --source-boot-volume-id or --source-volume-replica-id'
        )

    if not kwargs['source_boot_volume_id'] and not kwargs[
            'source_volume_replica_id']:
        if not kwargs['availability_domain']:
            raise click.UsageError(
                'An availability domain must be specified when restoring a boot volume from backup'
            )

    client = cli_util.build_client('core', 'blockstorage', ctx)

    if kwargs['source_boot_volume_id']:
        source_boot_volume = client.get_boot_volume(
            boot_volume_id=kwargs['source_boot_volume_id'])
        if not kwargs['compartment_id']:
            kwargs['compartment_id'] = source_boot_volume.data.compartment_id

    if kwargs['boot_volume_backup_id']:
        source_backup = client.get_boot_volume_backup(
            boot_volume_backup_id=kwargs['boot_volume_backup_id'])
        if not kwargs['compartment_id']:
            kwargs['compartment_id'] = source_backup.data.compartment_id

    if kwargs['source_volume_replica_id']:
        source_volume_replica = client.get_boot_volume_replica(
            boot_volume_replica_id=kwargs['source_volume_replica_id'])
        kwargs[
            'availability_domain'] = source_volume_replica.data.availability_domain
        if not kwargs['compartment_id']:
            kwargs[
                'compartment_id'] = source_volume_replica.data.compartment_id

    if kwargs['source_boot_volume_id'] or kwargs[
            'boot_volume_backup_id'] or kwargs['source_volume_replica_id']:
        if kwargs['boot_volume_backup_id']:
            source_details = {
                'type': 'bootVolumeBackup',
                'id': kwargs['boot_volume_backup_id']
            }
        elif kwargs['source_boot_volume_id']:
            source_details = {
                'type': 'bootVolume',
                'id': kwargs['source_boot_volume_id']
            }
        else:
            source_details = {
                'type': 'bootVolumeReplica',
                'id': kwargs['source_volume_replica_id']
            }

        kwargs['source_details'] = json.dumps(source_details)

    kwargs.pop('source_boot_volume_id', None)
    kwargs.pop('boot_volume_backup_id', None)
    kwargs.pop('source_volume_replica_id', None)

    json_skeleton_utils.remove_json_skeleton_params_from_dict(kwargs)

    ctx.invoke(blockstorage_cli.create_boot_volume, **kwargs)