Ejemplo n.º 1
0
    def run_job(self):
        self.status = SUCCESS
        self.log_callback.info('Uploading image.')

        if self.cloud_image_name:
            self.cloud_image_name = format_string_with_date(
                self.cloud_image_name)
            self.blob_name = ''.join([self.cloud_image_name, '.vhd'])
        else:
            self.cloud_image_name = self.status_msg['cloud_image_name']
            self.blob_name = self.status_msg['blob_name']

        build = re.search(sas_url_match, self.raw_image_upload_location)

        upload_azure_file(
            self.blob_name,
            build.group(2),
            self.status_msg['image_file'],
            build.group(1),
            max_retry_attempts=self.config.get_azure_max_retry_attempts(),
            max_workers=self.config.get_azure_max_workers(),
            sas_token=build.group(3),
            is_page_blob=True)
        self.log_callback.info('Uploaded blob: {blob} using sas token.'.format(
            blob=self.blob_name))
Ejemplo n.º 2
0
    def run_job(self):
        self.status = SUCCESS
        self.log_callback.info('Uploading image.')

        timestamp = None
        build_time = self.status_msg.get('build_time', 'unknown')

        if self.use_build_time and (build_time != 'unknown'):
            timestamp = timestamp_from_epoch(build_time)
        elif self.use_build_time and (build_time == 'unknown'):
            raise MashUploadException(
                'use_build_time set for job but build time is unknown.'
            )

        self.cloud_image_name = format_string_with_date(
            self.base_cloud_image_name,
            timestamp=timestamp
        )

        self.request_credentials([self.account])
        credentials = self.credentials[self.account]

        config = {
            'user': self.oci_user_id,
            'key_content': credentials['signing_key'],
            'fingerprint': credentials['fingerprint'],
            'tenancy': self.tenancy,
            'region': self.region
        }
        object_storage = ObjectStorageClient(config)
        namespace = object_storage.get_namespace().data
        upload_manager = UploadManager(
            object_storage,
            allow_parallel_uploads=True,
            parallel_process_count=self.upload_process_count
        )

        object_name = ''.join([self.cloud_image_name, '.qcow2'])
        self._image_size = stat(self.status_msg['image_file']).st_size

        with open(self.status_msg['image_file'], 'rb') as image_stream:
            upload_manager.upload_stream(
                namespace,
                self.bucket,
                object_name,
                image_stream,
                progress_callback=self._progress_callback
            )

        self.status_msg['cloud_image_name'] = self.cloud_image_name
        self.status_msg['object_name'] = object_name
        self.status_msg['namespace'] = namespace

        self.log_callback.info(
            'Uploaded image: {0}, to the bucket named: {1}'.format(
                object_name,
                self.bucket
            )
        )
Ejemplo n.º 3
0
    def run_job(self):
        self.status = SUCCESS
        self.percent_uploaded = 0
        self.progress_log = {}
        self.log_callback.info('Uploading image.')

        timestamp = None
        build_time = self.status_msg.get('build_time', 'unknown')

        if self.use_build_time and (build_time != 'unknown'):
            timestamp = timestamp_from_epoch(build_time)
        elif self.use_build_time and (build_time == 'unknown'):
            raise MashUploadException(
                'use_build_time set for job but build time is unknown.')

        self.cloud_image_name = format_string_with_date(
            self.base_cloud_image_name, timestamp=timestamp)

        self.request_credentials([self.account])
        credentials = self.credentials[self.account]

        aliyun_image = AliyunImage(credentials['access_key'],
                                   credentials['access_secret'],
                                   self.region,
                                   self.bucket,
                                   log_callback=self.log_callback)

        object_name = ''.join([self.cloud_image_name, '.qcow2'])

        exists = aliyun_image.image_tarball_exists(object_name)
        if exists and not self.force_replace_image:
            raise MashUploadException(
                'Image: {object_name} already exists '
                'in bucket: {bucket}. Use force_replace_image '
                'to replace the existing tarball.'.format(
                    object_name=object_name, bucket=self.bucket))
        elif exists and self.force_replace_image:
            self.log_callback.info(
                'Deleting image file: {0}, in the bucket named: {1}'.format(
                    object_name, self.bucket))
            aliyun_image.delete_storage_blob(object_name)

        aliyun_image.upload_image_tarball(
            self.status_msg['image_file'],
            blob_name=object_name,
            progress_callback=self.progress_callback)

        self.status_msg['cloud_image_name'] = self.cloud_image_name
        self.status_msg['object_name'] = object_name
        self.log_callback.info(
            'Uploaded image: {0}, to the bucket named: {1}'.format(
                object_name, self.bucket))
Ejemplo n.º 4
0
    def run_job(self):
        self.status = SUCCESS
        self.log_callback.info('Uploading image.')

        timestamp = None
        build_time = self.status_msg.get('build_time', 'unknown')

        if self.use_build_time and (build_time != 'unknown'):
            timestamp = timestamp_from_epoch(build_time)
        elif self.use_build_time and (build_time == 'unknown'):
            raise MashUploadException(
                'use_build_time set for job but build time is unknown.')

        self.cloud_image_name = format_string_with_date(
            self.base_cloud_image_name, timestamp=timestamp)
        blob_name = ''.join([self.cloud_image_name, '.vhd'])

        self.request_credentials([self.account])
        credentials = self.credentials[self.account]

        exists = blob_exists(credentials, blob_name, self.container,
                             self.resource_group, self.storage_account)

        if exists and not self.force_replace_image:
            raise MashUploadException(
                'Image tarball: {blob_name} already exists '
                'in container: {container}. Use force_replace_image '
                'to replace the existing tarball.'.format(
                    blob_name=blob_name, container=self.container))
        elif exists and self.force_replace_image:
            self.log_callback.info(
                'Deleting tarball: {0}, in the container named: '
                '{1}.'.format(blob_name, self.container))
            delete_blob(credentials, blob_name, self.container,
                        self.resource_group, self.storage_account)

        upload_azure_file(
            blob_name,
            self.container,
            self.status_msg['image_file'],
            self.storage_account,
            max_retry_attempts=self.config.get_azure_max_retry_attempts(),
            max_workers=self.config.get_azure_max_workers(),
            credentials=credentials,
            resource_group=self.resource_group,
            is_page_blob=True)

        self.status_msg['cloud_image_name'] = self.cloud_image_name
        self.status_msg['blob_name'] = blob_name
        self.log_callback.info(
            'Uploaded image: {0}, to the container: {1}'.format(
                blob_name, self.container))
Ejemplo n.º 5
0
    def run_job(self):
        self.status = SUCCESS
        self.log_callback.info('Uploading image.')

        timestamp = None
        build_time = self.status_msg.get('build_time', 'unknown')

        if self.use_build_time and (build_time != 'unknown'):
            timestamp = timestamp_from_epoch(build_time)
        elif self.use_build_time and (build_time == 'unknown'):
            raise MashUploadException(
                'use_build_time set for job but build time is unknown.')

        self.cloud_image_name = format_string_with_date(
            self.base_cloud_image_name, timestamp=timestamp)

        self.request_credentials([self.account])
        credentials = self.credentials[self.account]
        storage_driver = get_gce_storage_driver(credentials)

        object_name = ''.join([self.cloud_image_name, '.tar.gz'])

        exists = blob_exists(storage_driver, object_name, self.bucket)
        if exists and not self.force_replace_image:
            raise MashUploadException(
                'Image tarball: {object_name} already exists '
                'in bucket: {bucket}. Use force_replace_image '
                'to replace the existing tarball.'.format(
                    object_name=object_name, bucket=self.bucket))
        elif exists and self.force_replace_image:
            self.log_callback.info(
                'Deleting tarball: {0}, in the bucket named: {1}'.format(
                    object_name, self.bucket))
            delete_image_tarball(storage_driver, object_name, self.bucket)

        upload_image_tarball(storage_driver, object_name,
                             self.status_msg['image_file'], self.bucket)

        self.status_msg['cloud_image_name'] = self.cloud_image_name
        self.status_msg['object_name'] = object_name
        self.log_callback.info(
            'Uploaded image: {0}, to the bucket named: {1}'.format(
                object_name, self.bucket))
Ejemplo n.º 6
0
    def run_job(self):
        self.status = SUCCESS
        self.log_callback.info('Creating image.')

        self.cloud_image_name = self.status_msg['cloud_image_name']
        object_name = self.status_msg['object_name']

        timestamp = re.findall(r'\d{8}', self.cloud_image_name)[0]
        self.cloud_image_description = format_string_with_date(
            self.base_cloud_image_description, timestamp=timestamp)

        self.request_credentials([self.account])
        credentials = self.credentials[self.account]

        project = credentials.get('project_id')
        compute_driver = get_gce_compute_driver(credentials, version='alpha')

        uri = ''.join([
            'https://www.googleapis.com/storage/v1/b/', self.bucket, '/o/',
            object_name
        ])

        if get_gce_image(compute_driver, project, self.cloud_image_name):
            self.log_callback.info(
                'Replacing existing image with the same name.')
            delete_gce_image(compute_driver, project, self.cloud_image_name)

        rollout = create_gce_rollout(compute_driver, project)

        create_gce_image(compute_driver,
                         project,
                         self.cloud_image_name,
                         self.cloud_image_description,
                         uri,
                         family=self.family,
                         guest_os_features=self.guest_os_features,
                         rollout=rollout)

        self.log_callback.info('Created image has ID: {0}'.format(
            self.cloud_image_name))
Ejemplo n.º 7
0
def test_format_string_with_date_error():
    value = 'Name with a {timestamp}'
    format_string_with_date(value)
Ejemplo n.º 8
0
    def run_job(self):
        self.status = SUCCESS
        self.status_msg['source_regions'] = {}
        self.log_callback.info('Creating image.')

        timestamp = None
        build_time = self.status_msg.get('build_time', 'unknown')

        if self.use_build_time and (build_time != 'unknown'):
            timestamp = timestamp_from_epoch(build_time)
        elif self.use_build_time and (build_time == 'unknown'):
            raise MashUploadException(
                'use_build_time set for job but build time is unknown.')

        self.cloud_image_name = format_string_with_date(
            self.base_cloud_image_name, timestamp=timestamp)
        self.status_msg['cloud_image_name'] = self.cloud_image_name

        self.ec2_upload_parameters = {
            'image_name': self.cloud_image_name,
            'image_description': self.cloud_image_description,
            'ssh_key_pair_name': None,
            'image_arch': self.arch,
            'launch_ami': None,
            'use_grub2': True,
            'use_private_ip': False,
            'root_volume_size': 10,
            'image_virt_type': 'hvm',
            'launch_inst_type': 't2.micro',
            'bootkernel': None,
            'inst_user_name': 'ec2-user',
            'ssh_timeout': 300,
            'wait_count': 3,
            'vpc_subnet_id': '',
            'ssh_key_private_key_file': None,
            'security_group_ids': '',
            'sriov_type': 'simple',
            'access_key': None,
            'ena_support': True,
            'backing_store': 'gp3',
            'running_id': None,
            'secret_key': None,
            'billing_codes': None,
            'log_callback': self.log_callback
        }

        # Get all account credentials in one request
        accounts = []
        for region, info in self.target_regions.items():
            accounts.append(info['account'])

        self.request_credentials(accounts)

        for region, info in self.target_regions.items():
            self.status_msg['source_regions'][region] = None
            ssh_key_pair = None
            account = info['account']
            credentials = self.credentials[account]

            use_root_swap = info['use_root_swap']
            self.ec2_upload_parameters['launch_ami'] = info['helper_image']
            self.ec2_upload_parameters['billing_codes'] = \
                info['billing_codes']

            self.ec2_upload_parameters['access_key'] = \
                credentials['access_key_id']
            self.ec2_upload_parameters['secret_key'] = \
                credentials['secret_access_key']

            try:
                ec2_client = get_client('ec2', credentials['access_key_id'],
                                        credentials['secret_access_key'],
                                        region)

                exists = image_exists(ec2_client, self.cloud_image_name)
                if exists and not self.force_replace_image:
                    raise MashUploadException(
                        '{image_name} already exists. '
                        'Use force_replace_image to '
                        'replace the existing image.'.format(
                            image_name=self.cloud_image_name))
                elif exists and self.force_replace_image:
                    cleanup_all_ec2_images(credentials['access_key_id'],
                                           credentials['secret_access_key'],
                                           self.log_callback, info['regions'],
                                           self.cloud_image_name)

                # NOTE: Temporary ssh keys:
                # The temporary creation and registration of a ssh key pair
                # is considered a workaround implementation which should be better
                # covered by the EC2ImageUploader code. Due to a lack of
                # development resources in the ec2utils.ec2uploadimg project and
                # other peoples concerns for just using a generic mash ssh key
                # for the upload, the private _create_key_pair and _delete_key_pair
                # methods exists and could be hopefully replaced by a better
                # concept in the near future.
                ssh_key_pair = self._create_key_pair(ec2_client)

                self.ec2_upload_parameters['ssh_key_pair_name'] = \
                    ssh_key_pair.name
                self.ec2_upload_parameters['ssh_key_private_key_file'] = \
                    ssh_key_pair.private_key_file.name

                # Create a temporary vpc, subnet and security group for the
                # helper image, unless a subnet was specified.
                # This provides a security group with an open ssh port.
                ec2_setup = EC2Setup(credentials['access_key_id'],
                                     region,
                                     credentials['secret_access_key'],
                                     None,
                                     log_callback=self.log_callback)

                subnet_id = info.get('subnet')
                if subnet_id:
                    vpc_id = get_vpc_id_from_subnet(ec2_client, subnet_id)
                    security_group_id = ec2_setup.create_security_group(
                        vpc_id=vpc_id)
                else:
                    subnet_id = ec2_setup.create_vpc_subnet()
                    security_group_id = ec2_setup.create_security_group()

                self.ec2_upload_parameters['vpc_subnet_id'] = subnet_id
                self.ec2_upload_parameters['security_group_ids'] = \
                    security_group_id

                ec2_upload = EC2ImageUploader(**self.ec2_upload_parameters)

                ec2_upload.set_region(region)

                if use_root_swap:
                    ami_id = ec2_upload.create_image_use_root_swap(
                        self.status_msg['image_file'])
                else:
                    ami_id = ec2_upload.create_image(
                        self.status_msg['image_file'])

                self.status_msg['source_regions'][region] = ami_id
                self.log_callback.info(
                    'Created image has ID: {0} in region {1}'.format(
                        ami_id, region))
            except Exception as error:
                self.status = FAILED
                msg = 'Image creation in account {0} failed with: {1}'.format(
                    account, error)
                self.add_error_msg(msg)
                self.log_callback.error(msg)
                break  # No need to continue if one account fails
            finally:
                if ssh_key_pair:
                    self._delete_key_pair(ec2_client, ssh_key_pair)
                    ec2_setup.clean_up()

        if self.status != SUCCESS:
            for region, info in self.target_regions.items():
                credentials = self.credentials[info['account']]

                if self.status_msg['source_regions'].get(region):
                    # Only cleanup regions that passed

                    try:
                        cleanup_ec2_image(
                            credentials['access_key_id'],
                            credentials['secret_access_key'],
                            self.log_callback,
                            region,
                            image_id=self.status_msg['source_regions'][region])
                    except Exception as error:
                        self.log_callback.warning(
                            'Failed to cleanup image: {0} in region {1}.'
                            ' {2}'.format(
                                self.status_msg['source_regions'][region],
                                region, error))