def replace_disk(self, index, disk): ''' :param: index RAID disk index. Starts from 0 :type index: int :param: disk Replacement disk. :type: disk dict/Volume ''' disk_replace = storage2.volume(disk) replace_is_new = not disk_replace.id try: disk_replace.ensure() disk_find = self.disks[index] mdadm.mdadm('manage', self.raid_pv, '--fail', disk_find.device) mdadm.mdadm('manage', self.raid_pv, '--remove', disk_find.device) mdadm.mdadm('manage', self.raid_pv, '--add', disk_replace.device) self.disks[index] = disk_replace except: with util.capture_exception(logger=LOG): if replace_is_new: disk_replace.destroy(force=True) else: disk_find.destroy(force=True)
def _ensure(self): self._v1_compat = self.snap and len(self.snap['disks']) and \ isinstance(self.snap['disks'][0], dict) and \ 'snapshot' in self.snap['disks'][0] if self.snap: disks = [] snaps = [] try: # @todo: create disks concurrently for disk_snap in self.snap['disks']: if self._v1_compat: disk_snap = disk_snap['snapshot'] snap = storage2.snapshot(disk_snap) snaps.append(snap) if self.disks: if len(self.disks) != len(snaps): raise storage2.StorageError( 'Volume disks count is not equal to ' 'snapshot disks count') self.disks = map(storage2.volume, self.disks) # Mixing snapshots to self.volumes (if exist) or empty volumes disks = self.disks or [ storage2.volume(type=s['type']) for s in snaps ] for disk, snap in zip(disks, snaps): disk.snap = snap except: with util.capture_exception(logger=LOG): for disk in disks: disk.destroy() self.disks = disks if self._v1_compat: # is some old snapshots /dev/vgname occured self.vg = os.path.basename(self.snap['vg']) else: self.vg = self.snap['vg'] self.level = int(self.snap['level']) self.pv_uuid = self.snap['pv_uuid'] self.lvm_group_cfg = self.snap['lvm_group_cfg'] self.snap = None self._check_attr('level') self._check_attr('vg') self._check_attr('disks') assert int(self.level) in (0, 1, 5, 10), 'Unknown raid level: %s' % self.level # Making sure autoassembly is disabled before attaching disks self._disable_autoassembly() disks = [] for disk in self.disks: disk = storage2.volume(disk) disk.ensure() disks.append(disk) self.disks = disks disks_devices = [disk.device for disk in self.disks] if self.lvm_group_cfg: time.sleep(2) # Give a time to device manager try: raid_device = mdadm.mdfind(*disks_devices) except storage2.StorageError: raid_device = mdadm.findname() """ if self.level in (1, 10): for disk in disks_devices: mdadm.mdadm('misc', None, disk, zero_superblock=True, force=True) try: kwargs = dict(force=True, metadata='default', level=self.level, assume_clean=True, raid_devices=len(disks_devices)) mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) except: if self.level == 10 and self._v1_compat: self._v1_repair_raid10(raid_device) else: raise else: """ mdadm.mdadm('assemble', raid_device, *disks_devices) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) # Restore vg config vg_restore_file = tempfile.mktemp() with open(vg_restore_file, 'w') as f: f.write(base64.b64decode(self.lvm_group_cfg)) # Ensure RAID physical volume try: lvm2.pvs(raid_device) except: lvm2.pvcreate(raid_device, uuid=self.pv_uuid, restorefile=vg_restore_file) finally: lvm2.vgcfgrestore(self.vg, file=vg_restore_file) os.remove(vg_restore_file) # Check that logical volume exists lv_infos = lvm2.lvs(self.vg) if not lv_infos: raise storage2.StorageError( 'No logical volumes found in %s vol. group') lv_name = lv_infos.popitem()[1].lv_name self.device = lvm2.lvpath(self.vg, lv_name) # Activate volume group lvm2.vgchange(self.vg, available='y') # Wait for logical volume device file util.wait_until(lambda: os.path.exists(self.device), timeout=120, logger=LOG, error_text='Logical volume %s not found' % self.device) else: raid_device = mdadm.findname() kwargs = dict(force=True, level=self.level, assume_clean=True, raid_devices=len(disks_devices), metadata='default') mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) lvm2.pvcreate(raid_device, force=True) self.pv_uuid = lvm2.pvs(raid_device)[raid_device].pv_uuid lvm2.vgcreate(self.vg, raid_device) out, err = lvm2.lvcreate(self.vg, extents='100%FREE')[:2] try: clean_out = out.strip().split('\n')[-1].strip() vol = re.match(self.lv_re, clean_out).group(1) self.device = lvm2.lvpath(self.vg, vol) except: e = 'Logical volume creation failed: %s\n%s' % (out, err) raise Exception(e) self.lvm_group_cfg = lvm2.backup_vg_config(self.vg) self.raid_pv = raid_device
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() root_part_path = os.path.realpath('/dev/root') root_part_sysblock_path = glob.glob('/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename(os.path.dirname(root_part_sysblock_path)) arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) # update gcimagebundle try: pkgmgr.latest(self.gcimagebundle_pkg_name) except: e = sys.exc_info()[1] LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) gc_img_bundle_bin = software.which('gcimagebundle') o, e, p = util.system2((gc_img_bundle_bin, '-d', root_device, '-e', ','.join(self.exclude_dirs), '-o', rebundle_dir, '--output_file_name', arch_name), raise_exc=False) if p: raise HandlerError('Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e)) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name) arch_size = os.stat(arch_path).st_size uploader = FileTransfer(src=arch_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [str(failed['exc_info'][1]) for failed in upload_result['failed']] raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) goog_image_name = self._role_name.lower().replace('_', '-') + '-' + str(int(time.time())) try: LOG.info('Registering new image %s' % goog_image_name) compute = pl.new_compute_client() image_url = 'http://storage.googleapis.com/%s/%s' % (tmp_bucket_name, arch_name) req_body = dict( name=goog_image_name, sourceType='RAW', rawDisk=dict( source=image_url ) ) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False util.wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Faled to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() root_part_path = os.path.realpath('/dev/root') root_part_sysblock_path = glob.glob( '/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename( os.path.dirname(root_part_sysblock_path)) arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) # update gcimagebundle try: pkgmgr.latest(self.gcimagebundle_pkg_name) except: e = sys.exc_info()[1] LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) gc_img_bundle_bin = software.which('gcimagebundle') o, e, p = util.system2( (gc_img_bundle_bin, '-d', root_device, '-e', ','.join( self.exclude_dirs), '-o', rebundle_dir, '--output_file_name', arch_name), raise_exc=False) if p: raise HandlerError( 'Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e)) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint( 1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name) arch_size = os.stat(arch_path).st_size uploader = FileTransfer(src=arch_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [ str(failed['exc_info'][1]) for failed in upload_result['failed'] ] raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete( bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) goog_image_name = self._role_name.lower().replace( '_', '-') + '-' + str(int(time.time())) try: LOG.info('Registering new image %s' % goog_image_name) compute = pl.new_compute_client() image_url = 'http://storage.googleapis.com/%s/%s' % ( tmp_bucket_name, arch_name) req_body = dict(name=goog_image_name, sourceType='RAW', rawDisk=dict(source=image_url)) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False util.wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Faled to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() tmp_mount_dir = os.path.join(rebundle_dir, 'root') os.makedirs(tmp_mount_dir) image_name = 'disk.raw' image_path = os.path.join(rebundle_dir, image_name) root_size = coreutils.statvfs('/')['size'] LOG.debug('Creating image file %s' % image_path) with open(image_path, 'w') as f: f.truncate(root_size + 1*1024) try: LOG.debug('Creating partition table on image') system(('parted', image_path, 'mklabel', 'msdos')) system(('parted', image_path, 'mkpart', 'primary', 'ext2', 1, str(root_size/(1024*1024)))) # Map disk image out = system(('kpartx', '-av', image_path))[0] try: loop = re.search('(/dev/loop\d+)', out).group(1) root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1] LOG.info('Creating filesystem') storage2.filesystem('ext4').mkfs(root_dev_name) dev_uuid = uuid.uuid4() system(('tune2fs', '-U', str(dev_uuid), root_dev_name)) mount.mount(root_dev_name, tmp_mount_dir) try: lines = system(('/bin/mount', '-l'))[0].splitlines() exclude_dirs = set() for line in lines: mpoint = line.split()[2] if mpoint != '/': exclude_dirs.add(mpoint) exclude_dirs.update(self.exclude_dirs) excludes = [os.path.join(ex, '**') for ex in exclude_dirs] excludes.extend(self.exclude_files) excludes.extend(self._excludes) LOG.info('Copying root filesystem to image') rsync('/', tmp_mount_dir, archive=True, hard_links=True, times=True, sparse=True, exclude=excludes) LOG.info('Cleanup image') self._create_spec_devices(tmp_mount_dir) LOG.debug('Removing roles-builder user') sh = pexpect.spawn('/bin/sh') try: sh.sendline('chroot %s' % tmp_mount_dir) sh.expect('#') sh.sendline('userdel -rf %s' % ROLEBUILDER_USER) sh.expect('#') finally: sh.close() """ Patch fstab""" fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab') if os.path.exists(fstab_path): with open(fstab_path) as f: fstab = f.read() new_fstab = re.sub('UUID=\S+\s+/\s+(.*)', 'UUID=%s / \\1' % dev_uuid, fstab) with open(fstab_path, 'w') as f: f.write(new_fstab) finally: mount.umount(root_dev_name) finally: system(('kpartx', '-d', image_path)) LOG.info('Compressing image.') arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) tar = Tar() tar.create().gzip().sparse() tar.archive(arch_path) tar.add(image_name, rebundle_dir) system(str(tar), shell=True) finally: os.unlink(image_path) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name) arch_size = os.stat(arch_path).st_size uploader = FileTransfer(src=arch_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [str(failed['exc_info'][1]) for failed in upload_result['failed']] raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: with capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) try: goog_image_name = self._role_name.lower().replace('_', '-') LOG.info('Registering new image %s' % goog_image_name) # TODO: check duplicate names compute = pl.new_compute_client() # Getting this instance's kernel instance_id = pl.get_instance_id() zone = os.path.basename(pl.get_zone()) all_instances = compute.instances().list(project=proj_id, zone=zone, fields="items(kernel,id)").execute()['items'] try: kernel = filter(lambda inst: inst['id'] == instance_id, all_instances)[0]['kernel'] except KeyError: # Looks like this instance was started from image, getting kernel from image try: current_image = pl.get_image() current_image_fq = current_image.split('/') current_img_project = current_image_fq[1] current_img_name = current_image_fq[3] current_img_obj = compute.images().get(project=current_img_project, image=current_img_name).execute() kernel = current_img_obj['preferredKernel'] except: raise HandlerError('Could not obtain kernel for this instance') image_url = 'http://storage.googleapis.com/%s/%s' % (tmp_bucket_name, arch_name) req_body = dict( name=goog_image_name, sourceType='RAW', preferredKernel=kernel, rawDisk=dict( containerType='TAR', source=image_url ) ) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Faled to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)
def _remove_bucket(self, bucket_name, object, cloudstorage): with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=bucket_name, object=object).execute() cloudstorage.buckets().delete(bucket=bucket_name).execute()
def _ensure(self): self._v1_compat = self.snap and len(self.snap['disks']) and \ isinstance(self.snap['disks'][0], dict) and \ 'snapshot' in self.snap['disks'][0] if self.snap: disks = [] snaps = [] try: # @todo: create disks concurrently for disk_snap in self.snap['disks']: if self._v1_compat: disk_snap = disk_snap['snapshot'] snap = storage2.snapshot(disk_snap) snaps.append(snap) if self.disks: if len(self.disks) != len(snaps): raise storage2.StorageError('Volume disks count is not equal to ' 'snapshot disks count') self.disks = map(storage2.volume, self.disks) # Mixing snapshots to self.volumes (if exist) or empty volumes disks = self.disks or [storage2.volume(type=s['type']) for s in snaps] for disk, snap in zip(disks, snaps): disk.snap = snap except: with util.capture_exception(logger=LOG): for disk in disks: disk.destroy() self.disks = disks if self._v1_compat: # is some old snapshots /dev/vgname occured self.vg = os.path.basename(self.snap['vg']) else: self.vg = self.snap['vg'] self.level = int(self.snap['level']) self.pv_uuid = self.snap['pv_uuid'] self.lvm_group_cfg = self.snap['lvm_group_cfg'] self.snap = None self._check_attr('level') self._check_attr('vg') self._check_attr('disks') assert int(self.level) in (0,1,5,10), 'Unknown raid level: %s' % self.level disks = [] for disk in self.disks: disk = storage2.volume(disk) disk.ensure() disks.append(disk) self.disks = disks disks_devices = [disk.device for disk in self.disks] if self.lvm_group_cfg: try: raid_device = mdadm.mdfind(*disks_devices) except storage2.StorageError: raid_device = mdadm.findname() """ if self.level in (1, 10): for disk in disks_devices: mdadm.mdadm('misc', None, disk, zero_superblock=True, force=True) try: kwargs = dict(force=True, metadata='default', level=self.level, assume_clean=True, raid_devices=len(disks_devices)) mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) except: if self.level == 10 and self._v1_compat: self._v1_repair_raid10(raid_device) else: raise else: """ mdadm.mdadm('assemble', raid_device, *disks_devices) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) # Restore vg config vg_restore_file = tempfile.mktemp() with open(vg_restore_file, 'w') as f: f.write(base64.b64decode(self.lvm_group_cfg)) # Ensure RAID physical volume try: lvm2.pvs(raid_device) except: lvm2.pvcreate(raid_device, uuid=self.pv_uuid, restorefile=vg_restore_file) finally: lvm2.vgcfgrestore(self.vg, file=vg_restore_file) os.remove(vg_restore_file) # Check that logical volume exists lv_infos = lvm2.lvs(self.vg) if not lv_infos: raise storage2.StorageError( 'No logical volumes found in %s vol. group') lv_name = lv_infos.popitem()[1].lv_name self.device = lvm2.lvpath(self.vg, lv_name) # Activate volume group lvm2.vgchange(self.vg, available='y') # Wait for logical volume device file util.wait_until(lambda: os.path.exists(self.device), timeout=120, logger=LOG, error_text='Logical volume %s not found' % self.device) else: raid_device = mdadm.findname() kwargs = dict(force=True, level=self.level, assume_clean=True, raid_devices=len(disks_devices), metadata='default') mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) lvm2.pvcreate(raid_device, force=True) self.pv_uuid = lvm2.pvs(raid_device)[raid_device].pv_uuid lvm2.vgcreate(self.vg, raid_device) out, err = lvm2.lvcreate(self.vg, extents='100%FREE')[:2] try: clean_out = out.strip().split('\n')[-1].strip() vol = re.match(self.lv_re, clean_out).group(1) self.device = lvm2.lvpath(self.vg, vol) except: e = 'Logical volume creation failed: %s\n%s' % (out, err) raise Exception(e) self.lvm_group_cfg = lvm2.backup_vg_config(self.vg) self.raid_pv = raid_device
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.get_storage_conn() # Determine the root filesystem size devices = coreutils.df() root_disk = firstmatched(lambda x: x.mpoint == '/', devices) if not root_disk: raise HandlerError("Can't find root device") # in bytes adjusted to 512 block device size fssize = (root_disk.size * 1000 / 512) * 512 # Old code. Should be reworked if os.path.exists('/dev/root'): root_part_path = os.path.realpath('/dev/root') else: rootfs_stat = os.stat('/') root_device_minor = os.minor(rootfs_stat.st_dev) root_device_major = os.major(rootfs_stat.st_dev) root_part_path = os.path.realpath('/dev/block/{0}:{1}'.format( root_device_major, root_device_minor)) root_part_sysblock_path = glob.glob( '/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename( os.path.dirname(root_part_sysblock_path)) arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) # update gcimagebundle try: pkgmgr.latest(self.gcimagebundle_pkg_name) except: e = sys.exc_info()[1] LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) gc_img_bundle_bin = software.which('gcimagebundle') o, e, p = util.system2( (gc_img_bundle_bin, '-d', root_device, '-e', ','.join( self.exclude_dirs), '--fssize', str(fssize), '-o', rebundle_dir, '--output_file_name', arch_name), raise_exc=False) if p: raise HandlerError( 'Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e)) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint( 1, 1000000), int(time.time())) remote_dir = 'gcs://%s' % tmp_bucket_name def progress_cb(progress): LOG.debug('Uploading {perc}%'.format( perc=progress / os.path.getsize(arch_path))) uploader = largetransfer.Upload(arch_path, remote_dir, simple=True, progress_cb=progress_cb) uploader.apply_async() try: try: uploader.join() except: if uploader.error: error = uploader.error[1] else: error = sys.exc_info()[1] msg = 'Image upload failed. Error:\n{error}' msg = msg.format(error=error) raise HandlerError(msg) except: with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete( bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) goog_image_name = self._role_name.lower().replace( '_', '-') + '-' + str(int(time.time())) try: LOG.info('Registering new image %s' % goog_image_name) compute = pl.get_compute_conn() image_url = 'http://storage.googleapis.com/%s/%s' % ( tmp_bucket_name, arch_name) req_body = dict(name=goog_image_name, sourceType='RAW', rawDisk=dict(source=image_url)) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False util.wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Failed to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)