Beispiel #1
0
def get_sku_tier(name):  # pylint: disable=too-many-return-statements
    name = name.upper()
    if name in ['F1', 'FREE']:
        return 'FREE'
    if name in ['D1', "SHARED"]:
        return 'SHARED'
    if name in ['B1', 'B2', 'B3', 'BASIC']:
        return 'BASIC'
    if name in ['S1', 'S2', 'S3']:
        return 'STANDARD'
    if name in ['P1', 'P2', 'P3']:
        return 'PREMIUM'
    if name in ['P1V2', 'P2V2', 'P3V2']:
        return 'PREMIUMV2'
    if name in ['P1V3', 'P2V3', 'P3V3']:
        return 'PREMIUMV3'
    if name in ['PC2', 'PC3', 'PC4']:
        return 'PremiumContainer'
    if name in ['EP1', 'EP2', 'EP3']:
        return 'ElasticPremium'
    if name in ['I1', 'I2', 'I3']:
        return 'Isolated'
    if name in ['I1V2', 'I2V2', 'I3V2']:
        return 'IsolatedV2'
    if name in ['WS1', 'WS2', 'WS3']:
        return 'WorkflowStandard'
    raise ValidationError("Invalid sku(pricing tier), please refer to command help for valid values")
Beispiel #2
0
def check_provider_registrations(cli_ctx):
    try:
        rp_client = _resource_providers_client(cli_ctx)
        cc_registration_state = rp_client.get(
            consts.Connected_Cluster_Provider_Namespace).registration_state
        if cc_registration_state != "Registered":
            telemetry.set_exception(
                exception="{} provider is not registered".format(
                    consts.Connected_Cluster_Provider_Namespace),
                fault_type=consts.
                CC_Provider_Namespace_Not_Registered_Fault_Type,
                summary="{} provider is not registered".format(
                    consts.Connected_Cluster_Provider_Namespace))
            raise ValidationError(
                "{} provider is not registered. Please register it using 'az provider register -n 'Microsoft.Kubernetes' before running the connect command."
                .format(consts.Connected_Cluster_Provider_Namespace))
        kc_registration_state = rp_client.get(
            consts.Kubernetes_Configuration_Provider_Namespace
        ).registration_state
        if kc_registration_state != "Registered":
            telemetry.set_user_fault()
            logger.warning("{} provider is not registered".format(
                consts.Kubernetes_Configuration_Provider_Namespace))
    except ValidationError as e:
        raise e
    except Exception as ex:
        logger.warning(
            "Couldn't check the required provider's registration status. Error: {}"
            .format(str(ex)))
Beispiel #3
0
def get_pool_manager(url):
    proxies = urllib.request.getproxies()
    bypass_proxy = urllib.request.proxy_bypass(urllib.parse.urlparse(url).hostname)

    if 'https' in proxies and not bypass_proxy:
        proxy = urllib.parse.urlparse(proxies['https'])

        if proxy.username and proxy.password:
            proxy_headers = urllib3.util.make_headers(proxy_basic_auth='{0}:{1}'.format(proxy.username, proxy.password))
            logger.debug('Setting proxy-authorization header for basic auth')
        else:
            proxy_headers = None

        logger.info('Using proxy for app service tunnel connection')
        http = urllib3.ProxyManager(proxy.geturl(), proxy_headers=proxy_headers)
    else:
        http = urllib3.PoolManager()

    if should_disable_connection_verify():
        http.connection_pool_kw['cert_reqs'] = 'CERT_NONE'
    else:
        http.connection_pool_kw['cert_reqs'] = 'CERT_REQUIRED'
        if REQUESTS_CA_BUNDLE in os.environ:
            ca_bundle_file = os.environ[REQUESTS_CA_BUNDLE]
            logger.debug("Using CA bundle file at '%s'.", ca_bundle_file)
            if not os.path.isfile(ca_bundle_file):
                raise ValidationError('REQUESTS_CA_BUNDLE environment variable is specified with an invalid file path')
        else:
            ca_bundle_file = certifi.where()
        http.connection_pool_kw['ca_certs'] = ca_bundle_file
    return http
Beispiel #4
0
def backup_now(cmd,
               client,
               resource_group_name,
               vault_name,
               item_name,
               retain_until=None,
               container_name=None,
               backup_management_type=None,
               workload_type=None,
               backup_type=None,
               enable_compression=False):

    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, backup_management_type,
                     workload_type)
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")

    if item.properties.backup_management_type.lower() == "azureiaasvm":
        return custom.backup_now(cmd, client, resource_group_name, vault_name,
                                 item, retain_until)

    if item.properties.backup_management_type.lower() == "azurestorage":
        return custom_afs.backup_now(cmd, client, resource_group_name,
                                     vault_name, item, retain_until)

    if item.properties.backup_management_type.lower() == "azureworkload":
        return custom_wl.backup_now(cmd, client, resource_group_name,
                                    vault_name, item, retain_until,
                                    backup_type, enable_compression)
    return None
Beispiel #5
0
def undelete_protection(cmd,
                        client,
                        resource_group_name,
                        vault_name,
                        container_name,
                        item_name,
                        backup_management_type,
                        workload_type=None):
    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, backup_management_type,
                     workload_type)
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")

    if item.properties.backup_management_type.lower() == "azureiaasvm":
        return custom.undelete_protection(cmd, client, resource_group_name,
                                          vault_name, item)

    if item.properties.backup_management_type.lower() == "azureworkload":
        return custom_wl.undelete_protection(cmd, client, resource_group_name,
                                             vault_name, item)

    return None
Beispiel #6
0
def create_nat_gateway(cmd,
                       nat_gateway_name,
                       resource_group_name,
                       location=None,
                       public_ip_addresses=None,
                       public_ip_prefixes=None,
                       idle_timeout=None,
                       zone=None,
                       no_wait=False):

    if public_ip_addresses is None and public_ip_prefixes is None:
        err_msg = "Validation Error: At least 1 public IP address/prefix need to be attached."
        raise ValidationError(err_msg)

    client = network_client_factory(cmd.cli_ctx).nat_gateways
    NatGateway, NatGatewaySku = cmd.get_models('NatGateway', 'NatGatewaySku')

    nat_gateway = NatGateway(name=nat_gateway_name,
                             location=location,
                             sku=NatGatewaySku(name='Standard'),
                             idle_timeout_in_minutes=idle_timeout,
                             zones=zone,
                             public_ip_addresses=public_ip_addresses,
                             public_ip_prefixes=public_ip_prefixes)

    return sdk_no_wait(no_wait, client.begin_create_or_update,
                       resource_group_name, nat_gateway_name, nat_gateway)
Beispiel #7
0
def move_recovery_points(cmd,
                         resource_group_name,
                         vault_name,
                         container_name,
                         item_name,
                         rp_name,
                         source_tier,
                         destination_tier,
                         backup_management_type=None,
                         workload_type=None):

    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, backup_management_type,
                     workload_type)
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")

    if item.properties.backup_management_type.lower() == "azureiaasvm":
        return custom.move_recovery_points(cmd, resource_group_name,
                                           vault_name, item, rp_name,
                                           source_tier, destination_tier)

    if item.properties.backup_management_type.lower() == "azureworkload":
        return custom_wl.move_wl_recovery_points(cmd, resource_group_name,
                                                 vault_name, item, rp_name,
                                                 source_tier, destination_tier)

    raise ArgumentUsageError(
        'This command is not supported for --backup-management-type AzureStorage.'
    )
Beispiel #8
0
def undelete_protection(cmd, client, resource_group_name, vault_name, item):
    container_uri = cust_help.get_protection_container_uri_from_id(item.id)
    item_uri = cust_help.get_protected_item_uri_from_id(item.id)

    backup_item_type = item_uri.split(';')[0]
    if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(
            backup_item_type):
        raise ValidationError("""
            Item must be either of type SQLDataBase or SAPHanaDatabase.
            """)

    properties = _get_protected_item_instance(backup_item_type)
    properties.protection_state = 'ProtectionStopped'
    properties.policy_id = ''
    properties.is_rehydrate = True
    param = ProtectedItemResource(properties=properties)

    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     param,
                                     raw=True)
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name,
                                      resource_group_name)
def _validate_subnet_size(cli_ctx, subnet_id):
    subnet_id_parts = parse_resource_id(subnet_id)
    vnet_resource_group = subnet_id_parts['resource_group']
    vnet_name = subnet_id_parts['name']
    subnet_name = subnet_id_parts['resource_name']
    network_client = _get_network_client_factory(cli_ctx)
    subnet_obj = network_client.subnets.get(vnet_resource_group, vnet_name,
                                            subnet_name)
    address = subnet_obj.address_prefix
    size = int(address[address.index('/') + 1:])
    if size > 24:
        err_msg = 'Subnet size could cause scaling issues. Recommended size is at least /24.'
        rec_msg = 'Use --ignore-subnet-size-validation to skip size test.'
        validation_error = ValidationError(err_msg)
        validation_error.set_recommendation(rec_msg)
        raise validation_error
def create_ase_inbound_services(cmd,
                                resource_group_name,
                                name,
                                subnet,
                                vnet_name=None,
                                skip_dns=False):
    ase_client = _get_ase_client_factory(cmd.cli_ctx)
    ase = ase_client.get(resource_group_name, name)
    if not ase:
        raise ResourceNotFoundError(
            "App Service Environment '{}' not found.".format(name))

    if ase.internal_load_balancing_mode == 'None':
        raise ValidationError(
            'Private DNS Zone is not relevant for External ASE.')

    if ase.kind.lower() == 'asev3':
        # pending SDK update (ase_client.get_ase_v3_networking_configuration(resource_group_name, name))
        raise CommandNotFoundError(
            'create-inbound-services is currently not supported for ASEv3.')

    ase_vip_info = ase_client.get_vip_info(resource_group_name, name)
    inbound_ip_address = ase_vip_info.internal_ip_address
    inbound_subnet_id = _validate_subnet_id(cmd.cli_ctx, subnet, vnet_name,
                                            resource_group_name)
    inbound_vnet_id = _get_vnet_id_from_subnet(cmd.cli_ctx, inbound_subnet_id)

    if not skip_dns:
        _ensure_ase_private_dns_zone(cmd.cli_ctx,
                                     resource_group_name=resource_group_name,
                                     name=name,
                                     inbound_vnet_id=inbound_vnet_id,
                                     inbound_ip_address=inbound_ip_address)
    else:
        logger.warning('Parameter --skip-dns is deprecated.')
Beispiel #11
0
def _get_bicep_installation_path(system):
    if system == "Windows":
        return os.path.join(_bicep_installation_dir, "bicep.exe")
    if system in ("Linux", "Darwin"):
        return os.path.join(_bicep_installation_dir, "bicep")

    raise ValidationError(f'The platform "{format(system)}" is not supported.')
Beispiel #12
0
def update_file_service_properties(cmd,
                                   instance,
                                   enable_delete_retention=None,
                                   delete_retention_days=None):
    from azure.cli.core.azclierror import ValidationError
    # set delete retention policy according input
    if enable_delete_retention is not None:
        if enable_delete_retention is False:
            delete_retention_days = None
        instance.share_delete_retention_policy = cmd.get_models(
            'DeleteRetentionPolicy')(enabled=enable_delete_retention,
                                     days=delete_retention_days)

    # If already enabled, only update days
    if enable_delete_retention is None and delete_retention_days is not None:
        if instance.share_delete_retention_policy is not None and instance.share_delete_retention_policy.enabled:
            instance.share_delete_retention_policy.days = delete_retention_days
        else:
            raise ValidationError(
                "Delete Retention Policy hasn't been enabled, and you cannot set delete retention days. "
                "Please set --enable-delete-retention as true to enable Delete Retention Policy."
            )

    # Fix the issue in server when delete_retention_policy.enabled=False, the returned days is 0
    # TODO: remove it when server side return null not 0 for days
    if instance.share_delete_retention_policy is not None and instance.share_delete_retention_policy.enabled is False:
        instance.share_delete_retention_policy.days = None

    return instance
def validate_environment_location(cmd, location):
    from ._constants import MAX_ENV_PER_LOCATION
    env_list = list_managed_environments(cmd)

    locations = [loc["location"] for loc in env_list]
    locations = list(set(locations))  # remove duplicates

    location_count = {}
    for loc in locations:
        location_count[loc] = len(
            [e for e in env_list if e["location"] == loc])

    disallowed_locations = []
    for _, value in enumerate(location_count):
        if location_count[value] > MAX_ENV_PER_LOCATION - 1:
            disallowed_locations.append(value)

    res_locations = list_environment_locations(cmd)
    res_locations = [
        loc for loc in res_locations if loc not in disallowed_locations
    ]

    allowed_locs = ", ".join(res_locations)

    if location:
        try:
            _ensure_location_allowed(cmd, location, CONTAINER_APPS_RP,
                                     "managedEnvironments")
        except Exception as e:  # pylint: disable=broad-except
            raise ValidationError(
                "You cannot create a Containerapp environment in location {}. List of eligible locations: {}."
                .format(location, allowed_locs)) from e

    if len(res_locations) > 0:
        if not location:
            logger.warning("Creating environment on location {}.".format(
                res_locations[0]))
            return res_locations[0]
        if location in disallowed_locations:
            raise ValidationError(
                "You have more than {} environments in location {}. List of eligible locations: {}."
                .format(MAX_ENV_PER_LOCATION, location, allowed_locs))
        return location
    else:
        raise ValidationError(
            "You cannot create any more environments. Environments are limited to {} per location in a subscription. Please specify an existing environment using --environment."
            .format(MAX_ENV_PER_LOCATION))
Beispiel #14
0
def update_file_service_properties(cmd,
                                   instance,
                                   enable_delete_retention=None,
                                   delete_retention_days=None,
                                   enable_smb_multichannel=None,
                                   versions=None,
                                   authentication_methods=None,
                                   kerberos_ticket_encryption=None,
                                   channel_encryption=None):
    from azure.cli.core.azclierror import ValidationError
    params = {}
    # set delete retention policy according input
    if enable_delete_retention is not None:
        if enable_delete_retention is False:
            delete_retention_days = None
        instance.share_delete_retention_policy = cmd.get_models(
            'DeleteRetentionPolicy')(enabled=enable_delete_retention,
                                     days=delete_retention_days)

    # If already enabled, only update days
    if enable_delete_retention is None and delete_retention_days is not None:
        if instance.share_delete_retention_policy is not None and instance.share_delete_retention_policy.enabled:
            instance.share_delete_retention_policy.days = delete_retention_days
        else:
            raise ValidationError(
                "Delete Retention Policy hasn't been enabled, and you cannot set delete retention days. "
                "Please set --enable-delete-retention as true to enable Delete Retention Policy."
            )

    # Fix the issue in server when delete_retention_policy.enabled=False, the returned days is 0
    # TODO: remove it when server side return null not 0 for days
    if instance.share_delete_retention_policy is not None and instance.share_delete_retention_policy.enabled is False:
        instance.share_delete_retention_policy.days = None
    if instance.share_delete_retention_policy:
        params[
            'share_delete_retention_policy'] = instance.share_delete_retention_policy

    # set protocol settings
    if any([
            enable_smb_multichannel is not None, versions,
            authentication_methods, kerberos_ticket_encryption,
            channel_encryption
    ]):
        params['protocol_settings'] = instance.protocol_settings
    if enable_smb_multichannel is not None:
        params['protocol_settings'].smb.multichannel = cmd.get_models(
            'Multichannel')(enabled=enable_smb_multichannel)
    if versions is not None:
        params['protocol_settings'].smb.versions = versions
    if authentication_methods is not None:
        params[
            'protocol_settings'].smb.authentication_methods = authentication_methods
    if kerberos_ticket_encryption is not None:
        params[
            'protocol_settings'].smb.kerberos_ticket_encryption = kerberos_ticket_encryption
    if channel_encryption is not None:
        params['protocol_settings'].smb.channel_encryption = channel_encryption

    return params
Beispiel #15
0
def normalize_sku_for_staticapp(sku):
    if sku.lower() == 'free':
        return 'Free'
    if sku.lower() == 'standard':
        return 'Standard'
    raise ValidationError(
        "Invalid sku(pricing tier), please refer to command help for valid values"
    )
Beispiel #16
0
def _mysql_auto_grow_validator(auto_grow, replication_role, high_availability,
                               instance):
    if auto_grow is None:
        return
    if instance is not None:
        replication_role = instance.replication_role if replication_role is None else replication_role
        high_availability = instance.high_availability.mode if high_availability is None else high_availability
    # if replica, cannot be disabled
    if replication_role != 'None' and auto_grow.lower() == 'disabled':
        raise ValidationError(
            "Auto grow feature for replica server cannot be disabled.")
    # if ha, cannot be disabled
    if high_availability in ['Enabled', 'ZoneRedundant'
                             ] and auto_grow.lower() == 'disabled':
        raise ValidationError(
            "Auto grow feature for high availability server cannot be disabled."
        )
Beispiel #17
0
def validate_update_managed_service_correlation(cmd, namespace):
    client = servicefabric_managed_client_factory(cmd.cli_ctx)
    service = _safe_get_resource(client.services.get,
                                 (namespace.resource_group_name, namespace.cluster_name,
                                  namespace.application_name, namespace.service_name))

    if service is None:
        raise ValidationError("Service '{}' Not Found.".format(namespace.service_name))
Beispiel #18
0
def _get_app_from_revision(revision):
    if not revision:
        raise ValidationError('Invalid revision. Revision must not be empty')

    revision = revision.split('--')
    revision.pop()
    revision = "--".join(revision)
    return revision
Beispiel #19
0
def validate_key_import_type(ns):
    # Default value of kty is: RSA
    kty = getattr(ns, 'kty', None)
    crv = getattr(ns, 'curve', None)

    if (kty == 'EC' and crv is None) or (kty != 'EC' and crv):
        from azure.cli.core.azclierror import ValidationError
        raise ValidationError('parameter --curve should be specified when key type --kty is EC.')
Beispiel #20
0
def _validate_subscription_registered(cmd, resource_provider, subscription_id=None):
    if not subscription_id:
        subscription_id = get_subscription_id(cmd.cli_ctx)
    registered = _is_resource_provider_registered(cmd, resource_provider, subscription_id)
    if registered is False:
        raise ValidationError(f'Subscription {subscription_id} is not registered for the {resource_provider} '
                              f'resource provider. Please run "az provider register -n {resource_provider} --wait" '
                              'to register your subscription.')
Beispiel #21
0
def validate_registry_server(namespace):
    if "create" in namespace.command.lower():
        if namespace.registry_server:
            if not namespace.registry_user or not namespace.registry_pass:
                if ".azurecr.io" not in namespace.registry_server:
                    raise ValidationError(
                        "Usage error: --registry-server, --registry-password and --registry-username are required together if not using Azure Container Registry"
                    )
Beispiel #22
0
def validate_cpu(namespace):
    if namespace.cpu:
        cpu = namespace.cpu
        try:
            float(cpu)
        except ValueError as e:
            raise ValidationError(
                "Usage error: --cpu must be a number eg. \"0.5\"") from e
Beispiel #23
0
def validate_georestore_location(db_context, location):
    list_skus_info = get_mysql_list_skus_info(db_context.cmd,
                                              db_context.location)
    geo_paired_regions = list_skus_info['geo_paired_regions']

    if location not in geo_paired_regions:
        raise ValidationError(
            "The region is not paired with the region of the source server. ")
Beispiel #24
0
def validate_georestore_network(source_server_object, public_access, vnet,
                                subnet):
    if source_server_object.network.public_network_access == 'Disabled' and not any(
        (public_access, vnet, subnet)):
        raise ValidationError(
            "Please specify network parameters if you are geo-restoring a private access server. "
            "Run 'az mysql flexible-server goe-restore --help' command to see examples"
        )
Beispiel #25
0
def validate_create_managed_service_load_metric(cmd, namespace):
    client = servicefabric_managed_client_factory(cmd.cli_ctx)
    service = _safe_get_resource(client.services.get,
                                 (namespace.resource_group_name, namespace.cluster_name,
                                  namespace.application_name, namespace.service_name))

    if service is None:
        raise ValidationError("Service '{}' Not Found.".format(namespace.service_name))
    if service.properties.service_kind.lower() == ServiceKind.STATELESS.lower():
        if namespace.metric_name is None or namespace.weight is None or namespace.default_load is None:
            raise ValidationError("--metric-name, --weight and --default-load are required")
        if namespace.primary_default_load is not None or namespace.secondary_default_load is not None:
            raise ValidationError(
                "--primary-default-load and --secondary-default-load can only be used for stateful services."
            )
        namespace.default_load = int(namespace.default_load)
    elif service.properties.service_kind.lower() == ServiceKind.STATEFUL.lower():
        if namespace.metric_name is None or namespace.weight is None or \
           namespace.primary_default_load is None or namespace.secondary_default_load is None:
            raise ValidationError("--metric-name, --weight, --primary-default-load and "
                                  "--secondary-default-load are required")
        if namespace.default_load is not None:
            raise ValidationError("--default-load can only be used for stateless services.")
        namespace.primary_default_load = int(namespace.primary_default_load)
        namespace.secondary_default_load = int(namespace.secondary_default_load)
    else:
        raise ValidationError("Invalid --state '%s': service state is not valid." % service.properties.service_kind)
    if any(namespace.metric_name == metric.name for metric in service.properties.service_load_metrics):
        raise ValidationError("Duplicate metric names are not allowed: %s." % namespace.metric_name)
Beispiel #26
0
def _get_bicep_installation_path(system):
    installation_folder = os.path.join(str(Path.home()), ".azure", "bin")

    if system == "Windows":
        return os.path.join(installation_folder, "bicep.exe")
    if system in ("Linux", "Darwin"):
        return os.path.join(installation_folder, "bicep")

    raise ValidationError(f'The platform "{format(system)}" is not supported.')
Beispiel #27
0
def validate_ase_create(cmd, namespace):
    # Validate the ASE Name availability
    client = web_client_factory(cmd.cli_ctx)
    resource_type = 'Microsoft.Web/hostingEnvironments'
    if isinstance(namespace.name, str):
        name_validation = client.check_name_availability(
            namespace.name, resource_type)
        if not name_validation.name_available:
            raise ValidationError(name_validation.message)
Beispiel #28
0
def validate_public_access_server(cmd, client, resource_group_name, server_name):
    if isinstance(client, MySqlFirewallRulesOperations):
        server_operations_client = cf_mysql_flexible_servers(cmd.cli_ctx, '_')
    else:
        server_operations_client = cf_postgres_flexible_servers(cmd.cli_ctx, '_')

    server = server_operations_client.get(resource_group_name, server_name)
    if server.network.public_network_access == 'Disabled':
        raise ValidationError("Firewall rule operations cannot be requested for a private access enabled server.")
Beispiel #29
0
def _ensure_app_not_exist(client, resource_group, service, name):
    app = None
    try:
        app = client.apps.get(resource_group, service, name)
    except Exception:
        # ignore
        return
    if app:
        raise ValidationError('App {} already exist.'.format(app.id))
Beispiel #30
0
def await_github_action(cmd, token, repo, branch, name, resource_group_name, timeout_secs=1200):
    from .custom import show_github_action
    from ._clients import PollingAnimation

    start = datetime.utcnow()
    animation = PollingAnimation()
    animation.tick()

    github_repo = get_github_repo(token, repo)

    gh_action_status = "InProgress"
    while gh_action_status == "InProgress":
        time.sleep(SHORT_POLLING_INTERVAL_SECS)
        animation.tick()
        gh_action_status = safe_get(show_github_action(cmd, name, resource_group_name), "properties", "operationState")
        if (datetime.utcnow() - start).seconds >= timeout_secs:
            raise CLIInternalError("Timed out while waiting for the Github action to be created.")
        animation.flush()
    if gh_action_status == "Failed":
        raise CLIInternalError("The Github Action creation failed.")  # TODO ask backend team for a status url / message

    workflow = None
    while workflow is None:
        animation.tick()
        time.sleep(SHORT_POLLING_INTERVAL_SECS)
        workflow = get_workflow(github_repo, name)
        animation.flush()

        if (datetime.utcnow() - start).seconds >= timeout_secs:
            raise CLIInternalError("Timed out while waiting for the Github action to start.")

    runs = workflow.get_runs()
    while runs is None or not [r for r in runs if r.status in ('queued', 'in_progress')]:
        time.sleep(SHORT_POLLING_INTERVAL_SECS)
        runs = workflow.get_runs()
        if (datetime.utcnow() - start).seconds >= timeout_secs:
            raise CLIInternalError("Timed out while waiting for the Github action to be started.")
    runs = [r for r in runs if r.status in ('queued', 'in_progress')]
    runs.sort(key=lambda r: r.created_at, reverse=True)
    run = runs[0]  # run with the latest created_at date that's either in progress or queued
    logger.warning(f"Github action run: https://github.com/{repo}/actions/runs/{run.id}")
    logger.warning("Waiting for deployment to complete...")
    run_id = run.id
    status = run.status
    while status in ('queued', 'in_progress'):
        time.sleep(LONG_POLLING_INTERVAL_SECS)
        animation.tick()
        status = github_repo.get_workflow_run(run_id).status
        animation.flush()
        if (datetime.utcnow() - start).seconds >= timeout_secs:
            raise CLIInternalError("Timed out while waiting for the Github action to complete.")

    animation.flush()  # needed to clear the animation from the terminal
    run = github_repo.get_workflow_run(run_id)
    if run.status != "completed" or run.conclusion != "success":
        raise ValidationError("Github action build or deployment failed. "
                              f"Please see https://github.com/{repo}/actions/runs/{run.id} for more details")