示例#1
0
    def Run(self, args):
        dataproc = dp.Dataproc(self.ReleaseTrack())
        msgs = dataproc.messages

        template_ref = args.CONCEPTS.template.Parse()
        # TODO(b/109837200) make the dataproc discovery doc parameters consistent
        # Parent() fails for the collection because of projectId/projectsId and
        # regionId/regionsId inconsistencies.
        # parent = template_ref.Parent().RelativePath()
        parent = '/'.join(template_ref.RelativeName().split('/')[0:4])

        if args.source:
            with files.FileReader(args.source) as stream:
                template = util.ReadYaml(message_type=msgs.WorkflowTemplate,
                                         stream=stream,
                                         schema_path=SCHEMA_PATH)
        else:
            template = util.ReadYaml(message_type=msgs.WorkflowTemplate,
                                     stream=sys.stdin,
                                     schema_path=SCHEMA_PATH)

        # Populate id field.
        template.id = template_ref.Name()

        try:
            old_template = dataproc.GetRegionsWorkflowTemplate(template_ref)
        except apitools_exceptions.HttpError as error:
            if error.status_code != 404:
                raise error
            # Template does not exist. Create a new one.
            request = msgs.DataprocProjectsRegionsWorkflowTemplatesCreateRequest(
                parent=parent, workflowTemplate=template)
            return dataproc.client.projects_regions_workflowTemplates.Create(
                request)
        # Update the existing template.
        console_io.PromptContinue(
            message=('Workflow template [{0}] will be overwritten.').format(
                template.id),
            cancel_on_no=True)
        # Populate version field and name field.
        template.version = old_template.version
        template.name = template_ref.RelativeName()
        return dataproc.client.projects_regions_workflowTemplates.Update(
            template)
示例#2
0
 def _GetTemplateFromFile(self, args, messages):
     if not os.path.exists(args.file_name):
         raise exceptions.BadFileException('No such file [{0}]'.format(
             args.file_name))
     if os.path.isdir(args.file_name):
         raise exceptions.BadFileException('[{0}] is a directory'.format(
             args.file_name))
     try:
         with files.FileReader(args.file_name) as import_file:
             if args.file_format == 'json':
                 return security_policies_utils.SecurityPolicyFromFile(
                     import_file, messages, 'json')
             return security_policies_utils.SecurityPolicyFromFile(
                 import_file, messages, 'yaml')
     except Exception as exp:
         exp_msg = getattr(exp, 'message', six.text_type(exp))
         msg = ('Unable to read security policy config from specified file '
                '[{0}] because [{1}]'.format(args.file_name, exp_msg))
         raise exceptions.BadFileException(msg)
示例#3
0
def _FindMain(filename):
  """Check filename for 'package main' and 'func main'.

  Args:
    filename: (str) File name to check.

  Returns:
    (bool) True if main is found in filename.
  """
  with files.FileReader(filename) as f:
    found_package = False
    found_func = False
    for line in f:
      if re.match('^package main', line):
        found_package = True
      elif re.match('^func main', line):
        found_func = True
      if found_package and found_func:
        return True
  return False
def LoadBuildArtifactFile(path):
  """Load images from a file containing JSON build data.

  Args:
    path: str, build artifacts file path.

  Returns:
    Docker image name and tag dictionary.
  """
  with files.FileReader(path) as f:  # Returns user-friendly error messages
    try:
      structured_data = yaml.load(f, file_hint=path)
    except yaml.Error as e:
      raise exceptions.ParserError(path, e.inner_error)
    images = {}
    for build in structured_data['builds']:
      # For b/191063894. Supporting both name for now.
      images[build.get('image', build.get('imageName'))] = build['tag']

    return images
示例#5
0
def get_disk_counters():
  """Retrieves disk I/O statistics for all disks.

  Adapted from the psutil module's psutil._pslinux.disk_io_counters:
    http://code.google.com/p/psutil/source/browse/trunk/psutil/_pslinux.py

  Originally distributed under under a BSD license.
  Original Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola.

  Returns:
    A dictionary containing disk names mapped to the disk counters from
    /disk/diskstats.
  """
  # iostat documentation states that sectors are equivalent with blocks and
  # have a size of 512 bytes since 2.4 kernels. This value is needed to
  # calculate the amount of disk I/O in bytes.
  sector_size = 512

  partitions = _get_partitions()

  retdict = {}
  try:
    with files.FileReader('/proc/diskstats') as f:
      lines = f.readlines()
      for line in lines:
        values = line.split()[:11]
        _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = values
        if name in partitions:
          rbytes = int(rbytes) * sector_size
          wbytes = int(wbytes) * sector_size
          reads = int(reads)
          writes = int(writes)
          rtime = int(rtime)
          wtime = int(wtime)
          retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
  # This will catch access denied and file not found errors, which is expected
  # on non-Linux/limited access systems. All other errors will raise as normal.
  except files.Error:
    pass

  return retdict
示例#6
0
    def Run(self, args):
        dataproc = dp.Dataproc(self.ReleaseTrack())
        msgs = dataproc.messages

        if args.source:
            with files.FileReader(args.source) as stream:
                cluster = util.ReadYaml(message_type=msgs.Cluster,
                                        stream=stream,
                                        schema_path=SCHEMA_PATH)
        else:
            cluster = util.ReadYaml(message_type=msgs.Cluster,
                                    stream=sys.stdin,
                                    schema_path=SCHEMA_PATH)

        cluster_ref = util.ParseCluster(args.name, dataproc)
        cluster.clusterName = cluster_ref.clusterName
        cluster.projectId = cluster_ref.projectId

        # Import only supports create, not update (for now).
        return clusters.CreateCluster(dataproc, cluster, args. async,
                                      args.timeout)
示例#7
0
def load_path(path, round_trip=False):
    # type: (str, typing.Optional[bool]) -> typing.Any
    """Loads YAML from the given file path.

  Args:
    path: str, A file path to open and read from.
    round_trip: bool, True to use the RoundTripLoader which preserves ordering
      and line numbers.

  Raises:
    YAMLParseError: If the data could not be parsed.
    FileLoadError: If the file could not be opened or read.

  Returns:
    The parsed YAML data.
  """
    try:
        with files.FileReader(path) as fp:
            return load(fp, file_hint=path, round_trip=round_trip)
    except files.Error as e:
        raise FileLoadError(e, f=path)
示例#8
0
def load_all_path(path):
  """Loads multiple YAML documents from the given file path.

  Args:
    path: str, A file path to open and read from.

  Raises:
    YAMLParseError: If the data could not be parsed.
    FileLoadError: If the file could not be opened or read.

  Yields:
    The parsed YAML data.
  """
  try:
    with files.FileReader(path) as fp:
      for x in load_all(fp, file_hint=path):
        yield x
  except files.Error as e:
    # EnvironmentError is parent of IOError, OSError and WindowsError.
    # Raised when file does not exist or can't be opened/read.
    raise FileLoadError(e, f=path)
示例#9
0
def _get_partitions():
  """Retrieves a list of disk partitions.

  Returns:
    An array of partition names as strings.
  """
  partitions = []

  try:
    with files.FileReader('/proc/partitions') as f:
      lines = f.readlines()[2:]
      for line in lines:
        _, _, _, name = line.split()
        if name[-1].isdigit():
          partitions.append(name)
  # This will catch access denied and file not found errors, which is expected
  # on non-Linux/limited access systems. All other errors will raise as normal.
  except files.Error:
    pass

  return partitions
示例#10
0
def ParseRequirementsFile(requirements_file_path):
    """Parses the given requirements file into a requirements dictionary.

  Expects the file to have one requirement specifier per line. Only performs
  lightweight parsing of the file in order to form an API call. The Composer
  frontend handles validation.

  Args:
    requirements_file_path: Filepath to the requirements file.

  Returns:
    {string: string}, dict mapping from PyPI package name to extras and version
    specifier, if provided.

  Raises:
    Error: if requirements file cannot be read.
  """
    requirements = {}
    try:
        with files.FileReader(requirements_file_path) as requirements_file:
            for requirement_specifier in requirements_file:
                requirement_specifier = requirement_specifier.strip()
                if not requirement_specifier:
                    continue
                package, version = SplitRequirementSpecifier(
                    requirement_specifier)
                # Ensure package not already in entry list.
                if package in requirements:
                    raise Error(
                        'Duplicate package in requirements file: {0}'.format(
                            package))
                requirements[package] = version
            return requirements
    except files.Error:
        # EnvironmentError is parent of IOError, OSError and WindowsError.
        # Raised when file does not exist or can't be opened/read.
        raise Error('Unable to read requirements file {0}'.format(
            requirements_file_path))
示例#11
0
def _ReadTagMapping(file_name):
    """Imports legacy to secure tag mapping from a JSON file."""
    try:
        with files.FileReader(file_name) as f:
            data = json.load(f)
    except FileNotFoundError:
        log.status.Print(
            'File \'{file}\' was not found. Tag mapping was not imported.'.
            format(file=file_name))
        return None
    except OSError:
        log.status.Print(
            'OS error occurred when opening the file \'{file}\'. Tag mapping was not imported.'
            .format(file=file_name))
        return None
    except Exception as e:  # pylint: disable=broad-except
        log.status.Print(
            'Unexpected error occurred when reading the JSON file \'{file}\'. Tag mapping was not imported.'
            .format(file=file_name))
        log.status.Print(repr(e))
        return None

    return data
示例#12
0
def get_values_for_keys_from_file(file_path, keys):
    """Reads JSON or INI file and returns dict with values for requested keys.

  JSON file keys should be top level.
  INI file sections will be flattened.

  Args:
    file_path (str): Path of JSON or INI file to read.
    keys (list[str]): Search for these keys to return from file.

  Returns:
    Dict[cred_key: cred_value].

  Raises:
    ValueError: The file was the incorrect format.
    KeyError: Duplicate key found.
  """
    result = {}
    real_path = os.path.realpath(os.path.expanduser(file_path))
    with files.FileReader(real_path) as file_reader:
        try:
            file_dict = json.loads(file_reader.read())
            _extract_keys(keys, file_dict, result)
        except json.JSONDecodeError:
            # More file formats to try before raising error.
            config = configparser.ConfigParser()
            try:
                config.read(real_path)
            except configparser.ParsingError:
                raise ValueError(
                    'Source creds file must be JSON or INI format.')
            # Parse all sections of INI file into dict.
            for section in config:
                section_dict = dict(config[section])
                _extract_keys(keys, section_dict, result)

    return result
示例#13
0
    def Run(self, args):
        if not os.path.exists(args.file_name):
            raise exceptions.BadFileException('No such file [{0}]'.format(
                args.file_name))
        if os.path.isdir(args.file_name):
            raise exceptions.BadFileException('[{0}] is a directory'.format(
                args.file_name))

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        ref = self.SECURITY_POLICY_ARG.ResolveAsResource(
            args, holder.resources)

        # Get the imported security policy config.
        try:
            with files.FileReader(args.file_name) as import_file:
                if args.file_format == 'json':
                    imported = security_policies_utils.SecurityPolicyFromFile(
                        import_file, holder.client.messages, 'json')
                else:
                    imported = security_policies_utils.SecurityPolicyFromFile(
                        import_file, holder.client.messages, 'yaml')
        except Exception as exp:
            exp_msg = getattr(exp, 'message', six.text_type(exp))
            msg = (
                'Unable to read security policy config from specified file [{0}] '
                'because [{1}]'.format(args.file_name, exp_msg))
            raise exceptions.BadFileException(msg)

        # Send the change to the service.
        security_policy = client.SecurityPolicy(ref,
                                                compute_client=holder.client)
        security_policy.Patch(security_policy=imported)

        msg = 'Updated [{0}] with config from [{1}].'.format(
            ref.Name(), args.file_name)
        log.status.Print(msg)
示例#14
0
def LoadMessageFromPath(path, msg_type, msg_friendly_name,
                        skip_camel_case=None):
  """Load a proto message from a file containing JSON or YAML text.

  Args:
    path: The path to a file containing the JSON or YAML data to be decoded.
    msg_type: The protobuf message type to create.
    msg_friendly_name: A readable name for the message type, for use in error
      messages.
    skip_camel_case: Contains proto field names or map keys whose values should
      not have camel case applied.

  Raises:
    files.MissingFileError: If the file does not exist.
    ParserError: If there was a problem parsing the file as a dict.
    ParseProtoException: If there was a problem interpreting the file as the
    given message type.

  Returns:
    Proto message, The message that got decoded.
  """
  with files.FileReader(path) as f:  # Returns user-friendly error messages
    return LoadMessageFromStream(f, msg_type, msg_friendly_name,
                                 skip_camel_case, path)
 def FromJson(cls, path):
     with files.FileReader(path) as f:
         return cls(json.load(f, object_pairs_hook=OrderedDict))
示例#16
0
    def Run(self, args):
        message = (
            'A personal authentication session will propagate your personal '
            'credentials to the cluster, so make sure you trust the cluster '
            'and the user who created it.')
        console_io.PromptContinue(
            message=message,
            cancel_on_no=True,
            cancel_string='Enabling session aborted by user')
        dataproc = dp.Dataproc(self.ReleaseTrack())

        cluster_ref = args.CONCEPTS.cluster.Parse()
        project = cluster_ref.projectId
        region = cluster_ref.region
        cluster_name = cluster_ref.clusterName
        get_request = dataproc.messages.DataprocProjectsRegionsClustersGetRequest(
            projectId=project, region=region, clusterName=cluster_name)
        cluster = dataproc.client.projects_regions_clusters.Get(get_request)
        cluster_uuid = cluster.clusterUuid

        if args.access_boundary:
            with files.FileReader(args.access_boundary, mode='r') as abf:
                access_boundary_json = abf.read()
        else:
            access_boundary_json = flags.ProjectGcsObjectsAccessBoundary(
                project)

        openssl_executable = args.openssl_command
        if not openssl_executable:
            try:
                openssl_executable = files.FindExecutableOnPath('openssl')
            except ValueError:
                log.fatal(
                    'Could not find openssl on your system. The enable-session '
                    'command requires openssl to be installed.')

        operation_poller = waiter.CloudOperationPollerNoResources(
            dataproc.client.projects_regions_operations,
            lambda operation: operation.name)
        try:
            cluster_key = clusters.ClusterKey(cluster)
            if not cluster_key:
                raise exceptions.PersonalAuthError(
                    'The cluster {} does not support personal auth.'.format(
                        cluster_name))

            with progress_tracker.ProgressTracker(
                    'Injecting initial credentials into the cluster {}'.format(
                        cluster_name),
                    autotick=True):
                self.inject_credentials(dataproc, project, region,
                                        cluster_name, cluster_uuid,
                                        cluster_key, access_boundary_json,
                                        openssl_executable, operation_poller)

            if not args.refresh_credentials:
                return

            update_message = (
                'Periodically refreshing credentials for cluster {}. This'
                ' will continue running until the command is interrupted'
            ).format(cluster_name)

            with progress_tracker.ProgressTracker(update_message,
                                                  autotick=True):
                try:
                    # Cluster keys are periodically regenerated, so fetch the latest
                    # each time we inject credentials.
                    cluster = dataproc.client.projects_regions_clusters.Get(
                        get_request)
                    cluster_key = clusters.ClusterKey(cluster)
                    if not cluster_key:
                        raise exceptions.PersonalAuthError(
                            'The cluster {} does not support personal auth.'.
                            format(cluster_name))

                    failure_count = 0
                    while failure_count < 3:
                        try:
                            time.sleep(30)
                            self.inject_credentials(dataproc, project, region,
                                                    cluster_name, cluster_uuid,
                                                    cluster_key,
                                                    access_boundary_json,
                                                    openssl_executable,
                                                    operation_poller)
                            failure_count = 0
                        except ValueError as err:
                            log.error(err)
                            failure_count += 1
                    raise exceptions.PersonalAuthError(
                        'Credential injection failed three times in a row, giving up...'
                    )
                except (console_io.OperationCancelledError, KeyboardInterrupt):
                    return
        except exceptions.PersonalAuthError as err:
            log.error(err)
            return
示例#17
0
def GetServices():
    with files.FileReader(_SERVICE_CATALOG_PATH) as f:
        catalog = json.load(f)
        return catalog['services']
示例#18
0
 def FromJson(cls, path):
     with files.FileReader(path) as f:
         return cls(json.load(f))
示例#19
0
def read_or_create_download_tracker_file(source_object_resource,
                                         destination_url,
                                         slice_start_byte=0,
                                         existing_file_size=0,
                                         component_number=None,
                                         create=True):
    """Checks for a download tracker file and creates one if it does not exist.

  For normal downloads, if the tracker file exists, the existing_file_size
  in bytes is presumed to downloaded from the server. Therefore,
  existing_file_size becomes the download start point.

  For sliced downloads, the number of bytes previously retrieved from the server
  cannot be determined from existing_file_size. Therefore, it is retrieved
  from the tracker file.

  Args:
    source_object_resource (resource_reference.ObjectResource): Needed for
      object etag and generation.
    destination_url (storage_url.StorageUrl): Destination URL for tracker file.
    slice_start_byte (int): Start byte to use if we cannot find a
      matching tracker file for a download slice.
    existing_file_size (int): Amount of file on disk that already exists.
    component_number (int?): The download component number to find the start
      point for.
    create (bool): Creates tracker file if one could not be found.

  Returns:
    tracker_file_path (str?): The path to the tracker file, if one was used.
    download_start_byte (int): The first byte that still needs to be downloaded.

  Raises:
    ValueCannotBeDeterminedError: Source object resource does not have
      necessary metadata to decide on download start byte.
  """
    if not source_object_resource.etag:
        raise errors.ValueCannotBeDeterminedError(
            'Source object resource is missing etag.')

    tracker_file_path = None
    if (not source_object_resource.size
            or (source_object_resource.size <
                properties.VALUES.storage.resumable_threshold.GetInt())):
        # There is no tracker file for small downloads, so start from scratch.
        return tracker_file_path, slice_start_byte

    if component_number is None:
        tracker_file_type = TrackerFileType.DOWNLOAD
        download_name_for_logger = destination_url.object_name
    else:
        tracker_file_type = TrackerFileType.DOWNLOAD_COMPONENT
        download_name_for_logger = '{} component {}'.format(
            destination_url.object_name, component_number)

    tracker_file_path = get_tracker_file_path(
        destination_url, tracker_file_type, component_number=component_number)
    tracker_file = None
    # Check to see if we already have a matching tracker file.
    try:
        tracker_file = files.FileReader(tracker_file_path)
        if tracker_file_type is TrackerFileType.DOWNLOAD:
            etag_value = tracker_file.readline().rstrip('\n')
            if etag_value == source_object_resource.etag:
                log.debug(
                    'Found tracker file starting at byte {} for {}.'.format(
                        existing_file_size, download_name_for_logger))
                return tracker_file_path, existing_file_size
        elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
            component_data = json.loads(tracker_file.read())
            if (component_data['etag'] == source_object_resource.etag
                    and component_data['generation']
                    == source_object_resource.generation):
                start_byte = int(component_data['download_start_byte'])
                log.debug(
                    'Found tracker file starting at byte {} for {}.'.format(
                        start_byte, download_name_for_logger))
                return tracker_file_path, start_byte

    except files.MissingFileError:
        # Cannot read from file.
        pass

    finally:
        if tracker_file:
            tracker_file.close()

    if create:
        log.debug('No matching tracker file for {}.'.format(
            download_name_for_logger))
        if tracker_file_type is TrackerFileType.DOWNLOAD:
            _write_tracker_file(tracker_file_path,
                                source_object_resource.etag + '\n')
        elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
            write_download_component_tracker_file(tracker_file_path,
                                                  source_object_resource,
                                                  slice_start_byte)

    # No matching tracker file, so starting point is slice_start_byte.
    return tracker_file_path, slice_start_byte
示例#20
0
def _CreateGoogleAuthClientConfig(client_id_file=None):
    """Creates a client config from a client id file or gcloud's properties."""
    if client_id_file:
        with files.FileReader(client_id_file) as f:
            return json.load(f)
    return _CreateGoogleAuthClientConfigFromProperties()
  def Run(self, args):
    client = apis.GetClientInstance('storagetransfer', 'v1')
    messages = apis.GetMessagesModule('storagetransfer', 'v1')

    if args.creds_file:
      expanded_file_path = os.path.abspath(os.path.expanduser(args.creds_file))
      with files.FileReader(expanded_file_path) as file_reader:
        try:
          parsed_creds_file = json.load(file_reader)
          account_email = parsed_creds_file['client_email']
          is_service_account = parsed_creds_file['type'] == 'service_account'
        except (ValueError, KeyError) as e:
          log.error(e)
          raise ValueError('Invalid creds file format.'
                           ' Run command with "--help" flag for more details.')
        prefixed_account_email = _get_iam_prefixed_email(
            account_email, is_service_account)
    else:
      account_email = properties.VALUES.core.account.Get()
      is_service_account = creds.IsServiceAccountCredentials(creds_store.Load())
      prefixed_account_email = _get_iam_prefixed_email(account_email,
                                                       is_service_account)

    project_id = properties.VALUES.core.project.Get()
    parsed_project_id = projects_util.ParseProject(project_id)
    project_iam_policy = projects_api.GetIamPolicy(parsed_project_id)

    existing_user_roles = _get_existing_transfer_roles_for_account(
        project_iam_policy, prefixed_account_email, EXPECTED_USER_ROLES)
    log.status.Print('User {} has roles:\n{}'.format(account_email,
                                                     list(existing_user_roles)))
    missing_user_roles = EXPECTED_USER_ROLES - existing_user_roles
    log.status.Print('Missing roles:\n{}'.format(list(missing_user_roles)))

    all_missing_role_tuples = [
        (prefixed_account_email, role) for role in missing_user_roles
    ]

    log.status.Print('***')

    transfer_p4sa_email = client.googleServiceAccounts.Get(
        messages.StoragetransferGoogleServiceAccountsGetRequest(
            projectId=project_id)).accountEmail
    prefixed_transfer_p4sa_email = _get_iam_prefixed_email(
        transfer_p4sa_email, is_service_account=True)

    existing_p4sa_roles = _get_existing_transfer_roles_for_account(
        project_iam_policy, prefixed_transfer_p4sa_email, EXPECTED_P4SA_ROLES)
    log.status.Print('Google-managed transfer account {} has roles:\n{}'.format(
        transfer_p4sa_email, list(existing_p4sa_roles)))
    missing_p4sa_roles = EXPECTED_P4SA_ROLES - existing_p4sa_roles
    log.status.Print('Missing roles:\n{}'.format(list(missing_p4sa_roles)))

    all_missing_role_tuples += [
        (prefixed_transfer_p4sa_email, role) for role in missing_p4sa_roles
    ]

    if args.add_missing or all_missing_role_tuples:
      log.status.Print('***')
      if args.add_missing:
        if all_missing_role_tuples:
          log.status.Print('Adding roles:\n{}'.format(all_missing_role_tuples))
          projects_api.AddIamPolicyBindings(parsed_project_id,
                                            all_missing_role_tuples)
          log.status.Print('***')
          # Source:
          # https://cloud.google.com/iam/docs/granting-changing-revoking-access
          log.status.Print(
              'Done. Permissions typically take seconds to propagate, but,'
              ' in some cases, it can take up to seven minutes.')
        else:
          log.status.Print('No missing roles to add.')
      else:
        log.status.Print('Rerun with --add-missing to add missing roles.')
示例#22
0
    def Run(self, args):
        api_version = 'v1'
        # If in the future there are differences between API version, do NOT use
        # this patter of checking ReleaseTrack. Break this into multiple classes.
        if self.ReleaseTrack() == base.ReleaseTrack.BETA:
            api_version = 'v1beta2'

        if not os.path.exists(args.records_file):
            raise exceptions.ToolException('no such file [{0}]'.format(
                args.records_file))
        if os.path.isdir(args.records_file):
            raise exceptions.ToolException('[{0}] is a directory'.format(
                args.records_file))

        dns = apis.GetClientInstance('dns', api_version)

        # Get the managed-zone.
        zone_ref = util.GetRegistry(api_version).Parse(
            args.zone,
            params={
                'project': properties.VALUES.core.project.GetOrFail,
            },
            collection='dns.managedZones')

        try:
            zone = dns.managedZones.Get(
                dns.MESSAGES_MODULE.DnsManagedZonesGetRequest(
                    project=zone_ref.project,
                    managedZone=zone_ref.managedZone))
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error)

        # Get the current record-sets.
        current = {}
        for record in list_pager.YieldFromList(
                dns.resourceRecordSets,
                dns.MESSAGES_MODULE.DnsResourceRecordSetsListRequest(
                    project=zone_ref.project, managedZone=zone_ref.Name()),
                field='rrsets'):
            current[(record.name, record.type)] = record

        # Get the imported record-sets.
        try:
            with files.FileReader(args.records_file) as import_file:
                if args.zone_file_format:
                    imported = import_util.RecordSetsFromZoneFile(
                        import_file, zone.dnsName, api_version=api_version)
                else:
                    imported = import_util.RecordSetsFromYamlFile(
                        import_file, api_version=api_version)
        except Exception as exp:
            msg = (
                'unable to read record-sets from specified records-file [{0}] '
                'because [{1}]')
            msg = msg.format(args.records_file, exp.message)
            raise exceptions.ToolException(msg)

        # Get the change resulting from the imported record-sets.
        change = import_util.ComputeChange(current,
                                           imported,
                                           args.delete_all_existing,
                                           zone.dnsName,
                                           args.replace_origin_ns,
                                           api_version=api_version)
        if not change:
            msg = 'Nothing to do, all the records in [{0}] already exist.'.format(
                args.records_file)
            log.status.Print(msg)
            return None

        # Send the change to the service.
        result = dns.changes.Create(
            dns.MESSAGES_MODULE.DnsChangesCreateRequest(
                change=change, managedZone=zone.name,
                project=zone_ref.project))
        change_ref = util.GetRegistry(api_version).Create(
            collection='dns.changes',
            project=zone_ref.project,
            managedZone=zone.name,
            changeId=result.id)
        msg = 'Imported record-sets from [{0}] into managed-zone [{1}].'.format(
            args.records_file, zone_ref.Name())
        log.status.Print(msg)
        log.CreatedResource(change_ref)
        return result
def read_or_create_download_tracker_file(source_object_resource,
                                         destination_url,
                                         slice_start_byte=None,
                                         component_number=None,
                                         total_components=None):
    """Checks for a download tracker file and creates one if it does not exist.

  Args:
    source_object_resource (resource_reference.ObjectResource): Needed for
      object etag and generation.
    destination_url (storage_url.StorageUrl): Destination URL for tracker file.
    slice_start_byte (int|None): Start byte to use if we cannot find a
      matching tracker file for a download slice.
    component_number (int|None): The download component number to find the start
      point for. Indicates part of a multi-component download.
    total_components (int|None): The number of components in a sliced download.
      Indicates this is the parent tracker for a multi-component operation.

  Returns:
    tracker_file_path (str): The path to the tracker file (found or created).
    found_tracker_file (bool): False if tracker file had to be created.

  Raises:
    ValueCannotBeDeterminedError: Source object resource does not have
      necessary metadata to decide on download start byte.
  """
    if not source_object_resource.etag:
        raise errors.ValueCannotBeDeterminedError(
            'Source object resource is missing etag.')
    if total_components and (slice_start_byte is not None
                             or component_number is not None):
        raise ValueError(
            'total_components indicates this is the parent tracker file for a'
            ' multi-component operation. slice_start_byte and component_number'
            ' cannot be present since this is not for an individual component.'
        )

    if component_number is not None:
        download_name_for_logger = '{} component {}'.format(
            destination_url.object_name, component_number)
        tracker_file_type = TrackerFileType.DOWNLOAD_COMPONENT
    else:
        download_name_for_logger = destination_url.object_name
        if total_components is not None:
            tracker_file_type = TrackerFileType.SLICED_DOWNLOAD
        else:
            tracker_file_type = TrackerFileType.DOWNLOAD

    tracker_file_path = get_tracker_file_path(
        destination_url, tracker_file_type, component_number=component_number)
    log.debug('Searching for tracker file at {}.'.format(tracker_file_path))
    tracker_file = None
    does_tracker_file_match = False
    # Check to see if we already have a matching tracker file.
    try:
        tracker_file = files.FileReader(tracker_file_path)
        if tracker_file_type is TrackerFileType.DOWNLOAD:
            etag_value = tracker_file.readline().rstrip('\n')
            if etag_value == source_object_resource.etag:
                does_tracker_file_match = True
        else:
            component_data = json.loads(tracker_file.read())
            if (component_data['etag'] == source_object_resource.etag
                    and component_data['generation']
                    == source_object_resource.generation):
                if (tracker_file_type is TrackerFileType.SLICED_DOWNLOAD
                        and component_data['total_components']
                        == total_components):
                    does_tracker_file_match = True
                elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT and component_data[
                        'slice_start_byte'] == slice_start_byte:
                    does_tracker_file_match = True

        if does_tracker_file_match:
            log.debug(
                'Found tracker file for {}.'.format(download_name_for_logger))
            return tracker_file_path, True

    except files.MissingFileError:
        # Cannot read from file.
        pass

    finally:
        if tracker_file:
            tracker_file.close()

    if tracker_file:
        # The tracker file exists, but it's not valid.
        delete_download_tracker_files(destination_url)

    log.debug(
        'No matching tracker file for {}.'.format(download_name_for_logger))
    if tracker_file_type is TrackerFileType.DOWNLOAD:
        _write_tracker_file(tracker_file_path,
                            source_object_resource.etag + '\n')
    elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
        write_tracker_file_with_component_data(
            tracker_file_path,
            source_object_resource,
            slice_start_byte=slice_start_byte)
    elif tracker_file_type is TrackerFileType.SLICED_DOWNLOAD:
        write_tracker_file_with_component_data(
            tracker_file_path,
            source_object_resource,
            total_components=total_components)
    return tracker_file_path, False
    def Run(self, args):
        api_version = util.GetApiFromTrackAndArgs(self.ReleaseTrack(), args)

        if not os.path.exists(args.records_file):
            raise import_util.RecordsFileNotFound(
                'Specified record file [{0}] not found.'.format(
                    args.records_file))
        if os.path.isdir(args.records_file):
            raise import_util.RecordsFileIsADirectory(
                'Specified record file [{0}] is a directory'.format(
                    args.records_file))

        dns = util.GetApiClient(api_version)

        # Get the managed-zone.
        zone_ref = util.GetRegistry(api_version).Parse(
            args.zone,
            params=util.GetParamsForRegistry(api_version, args),
            collection='dns.managedZones')

        try:
            get_request = dns.MESSAGES_MODULE.DnsManagedZonesGetRequest(
                project=zone_ref.project, managedZone=zone_ref.managedZone)

            if api_version == 'v2' and self._IsBetaOrAlpha():
                get_request.location = args.location

            zone = dns.managedZones.Get(get_request)
        except apitools_exceptions.HttpError as error:
            raise calliope_exceptions.HttpException(error)

        # Get the current record-sets.
        current = {}
        list_request = dns.MESSAGES_MODULE.DnsResourceRecordSetsListRequest(
            project=zone_ref.project, managedZone=zone_ref.Name())

        if api_version == 'v2':
            list_request.location = args.location

        for record in list_pager.YieldFromList(dns.resourceRecordSets,
                                               list_request,
                                               field='rrsets'):
            current[(record.name, record.type)] = record

        # Get the imported record-sets.
        try:
            with files.FileReader(args.records_file) as import_file:
                if args.zone_file_format:
                    imported = import_util.RecordSetsFromZoneFile(
                        import_file, zone.dnsName, api_version=api_version)
                else:
                    imported = import_util.RecordSetsFromYamlFile(
                        import_file,
                        include_extended_records=self._IsAlpha(),
                        api_version=api_version)
        except Exception as exp:
            msg = (
                'Unable to read record-sets from specified records-file [{0}] '
                'because [{1}]')
            msg = msg.format(args.records_file, exp.message)
            raise import_util.UnableToReadRecordsFile(msg)

        # Get the change resulting from the imported record-sets.
        change = import_util.ComputeChange(current,
                                           imported,
                                           args.delete_all_existing,
                                           zone.dnsName,
                                           args.replace_origin_ns,
                                           api_version=api_version)
        if not change:
            msg = 'Nothing to do, all the records in [{0}] already exist.'.format(
                args.records_file)
            log.status.Print(msg)
            return None

        # Send the change to the service.
        create_request = dns.MESSAGES_MODULE.DnsChangesCreateRequest(
            change=change, managedZone=zone.name, project=zone_ref.project)

        if api_version == 'v2' and self._IsBetaOrAlpha():
            create_request.location = args.location

        result = dns.changes.Create(create_request)
        param = util.GetParamsForRegistry(api_version,
                                          args,
                                          parent='managedZones')
        param['changeId'] = result.id
        change_ref = util.GetRegistry(api_version).Parse(
            line=None, collection='dns.changes', params=param)
        msg = 'Imported record-sets from [{0}] into managed-zone [{1}].'.format(
            args.records_file, zone_ref.Name())
        log.status.Print(msg)
        log.CreatedResource(change_ref)
        return result
def cached_read_json_file(file_path):
    """Convert JSON file to an in-memory dict."""
    with files.FileReader(file_path) as file_reader:
        return json.load(file_reader)
示例#26
0
def GetADCAsJson():
    """Reads ADC from disk and converts it to a json object."""
    if not os.path.isfile(config.ADCFilePath()):
        return None
    with files.FileReader(config.ADCFilePath()) as f:
        return json.load(f)
示例#27
0
def read_or_create_download_tracker_file(source_object_resource,
                                         destination_url,
                                         existing_file_size=None,
                                         slice_start_byte=None,
                                         component_number=None,
                                         total_components=None,
                                         create=True):
    """Checks for a download tracker file and creates one if it does not exist.

  For normal downloads, if the tracker file exists, the existing_file_size
  in bytes is presumed to downloaded from the server. Therefore,
  existing_file_size becomes the download start point.

  For sliced downloads, the number of bytes previously retrieved from the server
  cannot be determined from existing_file_size. Therefore, it is retrieved
  from the tracker file.

  Args:
    source_object_resource (resource_reference.ObjectResource): Needed for
      object etag and generation.
    destination_url (storage_url.StorageUrl): Destination URL for tracker file.
    existing_file_size (int): Amount of file on disk that already exists.
    slice_start_byte (int|None): Start byte to use if we cannot find a
      matching tracker file for a download slice.
    component_number (int|None): The download component number to find the start
      point for. Indicates part of a multi-component download.
    total_components (int|None): The number of components in a sliced download.
      Indicates this is the master tracker for a multi-component operation.
    create (bool): Creates tracker file if one could not be found.

  Returns:
    tracker_file_path (str|None): The path to the tracker file, if one was used.
    download_start_byte (int|None): The first byte that still needs to be
      downloaded, if not a sliced download.

  Raises:
    ValueCannotBeDeterminedError: Source object resource does not have
      necessary metadata to decide on download start byte.
  """
    if not source_object_resource.etag:
        raise errors.ValueCannotBeDeterminedError(
            'Source object resource is missing etag.')
    if total_components and (slice_start_byte is not None
                             or component_number is not None):
        raise ValueError(
            'total_components indicates this is the master tracker file for a'
            ' multi-component operation. slice_start_byte and component_number'
            ' cannot be present since this is not for an individual component.'
        )

    if component_number:
        download_name_for_logger = '{} component {}'.format(
            destination_url.object_name, component_number)
        tracker_file_type = TrackerFileType.DOWNLOAD_COMPONENT
    else:
        download_name_for_logger = destination_url.object_name
        if total_components:
            tracker_file_type = TrackerFileType.SLICED_DOWNLOAD
        else:
            tracker_file_type = TrackerFileType.DOWNLOAD

    tracker_file_path = get_tracker_file_path(
        destination_url, tracker_file_type, component_number=component_number)
    tracker_file = None
    # Check to see if we already have a matching tracker file.
    try:
        tracker_file = files.FileReader(tracker_file_path)
        if tracker_file_type is TrackerFileType.DOWNLOAD:
            etag_value = tracker_file.readline().rstrip('\n')
            if etag_value == source_object_resource.etag:
                log.debug(
                    'Found tracker file starting at byte {} for {}.'.format(
                        existing_file_size, download_name_for_logger))
                return tracker_file_path, existing_file_size
        else:
            component_data = json.loads(tracker_file.read())
            if (component_data['etag'] == source_object_resource.etag
                    and component_data['generation']
                    == source_object_resource.generation):
                if (tracker_file_type is TrackerFileType.SLICED_DOWNLOAD
                        and component_data['total_components']
                        == total_components):
                    log.debug(
                        'Found tracker file for sliced download {}.'.format(
                            download_name_for_logger))
                    return tracker_file_path, None
                elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
                    # Normal resumable download.
                    start_byte = int(component_data['download_start_byte'])
                    log.debug('Found tracker file starting at byte {} for {}.'.
                              format(start_byte, download_name_for_logger))
                    return tracker_file_path, start_byte

    except files.MissingFileError:
        # Cannot read from file.
        pass

    finally:
        if tracker_file:
            tracker_file.close()

    log.debug(
        'No matching tracker file for {}.'.format(download_name_for_logger))

    start_byte = 0
    if create:
        if tracker_file_type is TrackerFileType.DOWNLOAD:
            _write_tracker_file(tracker_file_path,
                                source_object_resource.etag + '\n')
        elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
            write_tracker_file_with_component_data(
                tracker_file_path,
                source_object_resource,
                download_start_byte=slice_start_byte)
            start_byte = slice_start_byte
        elif tracker_file_type is TrackerFileType.SLICED_DOWNLOAD:
            # Delete component tracker files to reset full sliced download.
            delete_download_tracker_files(destination_url)
            write_tracker_file_with_component_data(
                tracker_file_path,
                source_object_resource,
                total_components=total_components)
            start_byte = None

    return tracker_file_path, start_byte