def snapshot(self, op, name): rebundle_dir = tempfile.mkdtemp() archive_path = '' try: pl = __node__['platform'] proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() root_part_path = os.path.realpath('/dev/root') root_part_sysblock_path = glob.glob('/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename(os.path.dirname(root_part_sysblock_path)) archive_name = '%s.tar.gz' % name.lower() archive_path = os.path.join(rebundle_dir, archive_name) self._prepare_software() gcimagebundle_bin = software.which('gcimagebundle') out, err, code = util.system2((gcimagebundle_bin, '-d', root_device, '-e', ','.join(self.exclude_dirs), '-o', rebundle_dir, '--output_file_name', archive_name), raise_exc=False) if code: raise ImageAPIError('Gcimagebundle util returned non-zero code %s. Stderr: %s' % (code, err)) LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, archive_name) arch_size = os.stat(archive_path).st_size uploader = FileTransfer(src=archive_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [str(failed['exc_info'][1]) for failed in upload_result['failed']] raise ImageAPIError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: self._remove_bucket(tmp_bucket_name, archive_name, cloudstorage) raise finally: shutil.rmtree(rebundle_dir) if os.path.exists(archive_path): os.remove(archive_path) image_name = name.lower().replace('_', '-') + '-' + str(int(time.time())) self._register_image(image_name, tmp_bucket_name, archive_name, cloudstorage) return '%s/images/%s' % (proj_name, image_name)
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() root_part_path = os.path.realpath('/dev/root') root_part_sysblock_path = glob.glob('/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename(os.path.dirname(root_part_sysblock_path)) arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) # update gcimagebundle try: pkgmgr.latest(self.gcimagebundle_pkg_name) except: e = sys.exc_info()[1] LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) gc_img_bundle_bin = software.which('gcimagebundle') o, e, p = util.system2((gc_img_bundle_bin, '-d', root_device, '-e', ','.join(self.exclude_dirs), '-o', rebundle_dir, '--output_file_name', arch_name), raise_exc=False) if p: raise HandlerError('Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e)) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name) arch_size = os.stat(arch_path).st_size uploader = FileTransfer(src=arch_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [str(failed['exc_info'][1]) for failed in upload_result['failed']] raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) goog_image_name = self._role_name.lower().replace('_', '-') + '-' + str(int(time.time())) try: LOG.info('Registering new image %s' % goog_image_name) compute = pl.new_compute_client() image_url = 'http://storage.googleapis.com/%s/%s' % (tmp_bucket_name, arch_name) req_body = dict( name=goog_image_name, sourceType='RAW', rawDisk=dict( source=image_url ) ) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False util.wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Faled to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() root_part_path = os.path.realpath('/dev/root') root_part_sysblock_path = glob.glob( '/sys/block/*/%s' % os.path.basename(root_part_path))[0] root_device = '/dev/%s' % os.path.basename( os.path.dirname(root_part_sysblock_path)) arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) # update gcimagebundle try: pkgmgr.latest(self.gcimagebundle_pkg_name) except: e = sys.exc_info()[1] LOG.warn('Gcimagebundle update failed: %s' % e) if os_dist.redhat_family: semanage = software.which('semanage') if not semanage: pkgmgr.installed('policycoreutils-python') semanage = software.which('semanage') util.system2((semanage, 'permissive', '-a', 'rsync_t')) gc_img_bundle_bin = software.which('gcimagebundle') o, e, p = util.system2( (gc_img_bundle_bin, '-d', root_device, '-e', ','.join( self.exclude_dirs), '-o', rebundle_dir, '--output_file_name', arch_name), raise_exc=False) if p: raise HandlerError( 'Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e)) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint( 1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name) arch_size = os.stat(arch_path).st_size uploader = FileTransfer(src=arch_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [ str(failed['exc_info'][1]) for failed in upload_result['failed'] ] raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: with util.capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete( bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) goog_image_name = self._role_name.lower().replace( '_', '-') + '-' + str(int(time.time())) try: LOG.info('Registering new image %s' % goog_image_name) compute = pl.new_compute_client() image_url = 'http://storage.googleapis.com/%s/%s' % ( tmp_bucket_name, arch_name) req_body = dict(name=goog_image_name, sourceType='RAW', rawDisk=dict(source=image_url)) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False util.wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Faled to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)
def rebundle(self): rebundle_dir = tempfile.mkdtemp() try: pl = bus.platform proj_id = pl.get_numeric_project_id() proj_name = pl.get_project_id() cloudstorage = pl.new_storage_client() tmp_mount_dir = os.path.join(rebundle_dir, 'root') os.makedirs(tmp_mount_dir) image_name = 'disk.raw' image_path = os.path.join(rebundle_dir, image_name) root_size = coreutils.statvfs('/')['size'] LOG.debug('Creating image file %s' % image_path) with open(image_path, 'w') as f: f.truncate(root_size + 1*1024) try: LOG.debug('Creating partition table on image') system(('parted', image_path, 'mklabel', 'msdos')) system(('parted', image_path, 'mkpart', 'primary', 'ext2', 1, str(root_size/(1024*1024)))) # Map disk image out = system(('kpartx', '-av', image_path))[0] try: loop = re.search('(/dev/loop\d+)', out).group(1) root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1] LOG.info('Creating filesystem') storage2.filesystem('ext4').mkfs(root_dev_name) dev_uuid = uuid.uuid4() system(('tune2fs', '-U', str(dev_uuid), root_dev_name)) mount.mount(root_dev_name, tmp_mount_dir) try: lines = system(('/bin/mount', '-l'))[0].splitlines() exclude_dirs = set() for line in lines: mpoint = line.split()[2] if mpoint != '/': exclude_dirs.add(mpoint) exclude_dirs.update(self.exclude_dirs) excludes = [os.path.join(ex, '**') for ex in exclude_dirs] excludes.extend(self.exclude_files) excludes.extend(self._excludes) LOG.info('Copying root filesystem to image') rsync('/', tmp_mount_dir, archive=True, hard_links=True, times=True, sparse=True, exclude=excludes) LOG.info('Cleanup image') self._create_spec_devices(tmp_mount_dir) LOG.debug('Removing roles-builder user') sh = pexpect.spawn('/bin/sh') try: sh.sendline('chroot %s' % tmp_mount_dir) sh.expect('#') sh.sendline('userdel -rf %s' % ROLEBUILDER_USER) sh.expect('#') finally: sh.close() """ Patch fstab""" fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab') if os.path.exists(fstab_path): with open(fstab_path) as f: fstab = f.read() new_fstab = re.sub('UUID=\S+\s+/\s+(.*)', 'UUID=%s / \\1' % dev_uuid, fstab) with open(fstab_path, 'w') as f: f.write(new_fstab) finally: mount.umount(root_dev_name) finally: system(('kpartx', '-d', image_path)) LOG.info('Compressing image.') arch_name = '%s.tar.gz' % self._role_name.lower() arch_path = os.path.join(rebundle_dir, arch_name) tar = Tar() tar.create().gzip().sparse() tar.archive(arch_path) tar.add(image_name, rebundle_dir) system(str(tar), shell=True) finally: os.unlink(image_path) try: LOG.info('Uploading compressed image to cloud storage') tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(1, 1000000), int(time.time())) remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name) arch_size = os.stat(arch_path).st_size uploader = FileTransfer(src=arch_path, dst=remote_path) try: upload_result = uploader.run() if upload_result['failed']: errors = [str(failed['exc_info'][1]) for failed in upload_result['failed']] raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors)) assert arch_size == upload_result['completed'][0]['size'] except: with capture_exception(LOG): objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() finally: os.unlink(arch_path) finally: shutil.rmtree(rebundle_dir) try: goog_image_name = self._role_name.lower().replace('_', '-') LOG.info('Registering new image %s' % goog_image_name) # TODO: check duplicate names compute = pl.new_compute_client() # Getting this instance's kernel instance_id = pl.get_instance_id() zone = os.path.basename(pl.get_zone()) all_instances = compute.instances().list(project=proj_id, zone=zone, fields="items(kernel,id)").execute()['items'] try: kernel = filter(lambda inst: inst['id'] == instance_id, all_instances)[0]['kernel'] except KeyError: # Looks like this instance was started from image, getting kernel from image try: current_image = pl.get_image() current_image_fq = current_image.split('/') current_img_project = current_image_fq[1] current_img_name = current_image_fq[3] current_img_obj = compute.images().get(project=current_img_project, image=current_img_name).execute() kernel = current_img_obj['preferredKernel'] except: raise HandlerError('Could not obtain kernel for this instance') image_url = 'http://storage.googleapis.com/%s/%s' % (tmp_bucket_name, arch_name) req_body = dict( name=goog_image_name, sourceType='RAW', preferredKernel=kernel, rawDisk=dict( containerType='TAR', source=image_url ) ) req = compute.images().insert(project=proj_id, body=req_body) operation = req.execute()['name'] LOG.info('Waiting for image to register') def image_is_ready(): req = compute.globalOperations().get(project=proj_id, operation=operation) res = req.execute() if res['status'] == 'DONE': if res.get('error'): errors = [] for e in res['error']['errors']: err_text = '%s: %s' % (e['code'], e['message']) errors.append(err_text) raise Exception('\n'.join(errors)) return True return False wait_until(image_is_ready, logger=LOG, timeout=600) finally: try: objs = cloudstorage.objects() objs.delete(bucket=tmp_bucket_name, object=arch_name).execute() cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute() except: e = sys.exc_info()[1] LOG.error('Faled to remove image compressed source: %s' % e) return '%s/images/%s' % (proj_name, goog_image_name)