def x_import_base(self, args): source = args.path if os.path.exists(source) is False or os.access(source, os.R_OK) is False: print "Cannot read file: %s" % source return 1 (base_hashvalue, disk_name, _, _, _) = \ PackagingUtil._get_basevm_attribute(source) disk_image_path = DIR_NEPHELE_IMAGES + disk_name # check if this filename already exists and warn if os.path.exists(disk_image_path): if not raw_input( "An image with this filename already exists.\nAre you sure you wish to overwrite the following base image: %s? (y/N): " % (disk_image_path)).lower().strip().startswith("y"): sys.exit(1) if not raw_input( "This will render any snapshots based on this image unusable. Are you certain? (y/N): " ).lower().strip().startswith("y"): sys.exit(1) print "Decompressing image to %s..." % DIR_NEPHELE_IMAGES zipbase = zipfile.ZipFile(source, 'r') zipbase.extractall(DIR_NEPHELE_IMAGES) print "Extracted image files to %s." % (disk_image_path) # add to DB new_basevm = table_def.BaseVM(disk_image_path, base_hashvalue, source) dbconn = DBConnector() dbconn.add_item(new_basevm) #restart the stream-server to reload list of images os.system('service stream-server restart')
def request_import_basevm(server_address, token, endpoint, glance_endpoint, import_filepath, basevm_name): def _create_param(filepath, image_name, image_type, disk_size, mem_size): properties = { "image_type": "snapshot", "image_location": "snapshot", CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET: "True", CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE: image_type, CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID: base_hashvalue, } param = { "name": "%s" % image_name, "data": open(filepath, "rb"), "size": os.path.getsize(filepath), "is_public": True, "disk_format": "raw", "container_format": "bare", "min_disk": disk_size, "min_ram": mem_size, "properties": properties, } return param (base_hashvalue, disk_name, memory_name, diskhash_name, memoryhash_name) = \ PackagingUtil._get_basevm_attribute(import_filepath) # check duplicated base VM image_list = get_list(server_address, token, endpoint, "images") for image in image_list: properties = image.get("metadata", None) if properties is None or len(properties) == 0: continue if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \ CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK: continue base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID) if base_sha256_uuid == base_hashvalue: msg = "Duplicated base VM is already exists on the system\n" msg += "Image UUID of duplicated Base VM: %s\n" % image['id'] raise CloudletClientError(msg) # decompress files temp_dir = mkdtemp(prefix="cloudlet-base-") sys.stdout.write("Decompressing zipfile(%s) to temp dir(%s)\n" % (import_filepath, temp_dir)) zipbase = zipfile.ZipFile( _FileFile("file:///%s" % os.path.abspath(import_filepath)), 'r') zipbase.extractall(temp_dir) disk_path = os.path.join(temp_dir, disk_name) memory_path = os.path.join(temp_dir, memory_name) diskhash_path = os.path.join(temp_dir, diskhash_name) memoryhash_path = os.path.join(temp_dir, memoryhash_name) # create new flavor if nothing matches memory_header = elijah_memory_util._QemuMemoryHeader(open(memory_path)) libvirt_xml_str = memory_header.xml cpu_count, memory_size_mb = get_resource_size(libvirt_xml_str) disk_gb = int(math.ceil(os.path.getsize(disk_path) / 1024 / 1024 / 1024)) flavor_list = get_list(server_address, token, endpoint, "flavors") flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count, memory_size_mb, disk_gb) if flavor_id == None: flavor_name = "cloudlet-flavor-%s" % basevm_name flavor_ref, flavor_id = create_flavor(server_address, token, endpoint, cpu_count, memory_size_mb, disk_gb, flavor_name) sys.stdout.write("Create new flavor for the base VM\n") # upload Base VM disk_param = _create_param(disk_path, basevm_name + "-disk", CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK, disk_gb, memory_size_mb) memory_param = _create_param(memory_path, basevm_name + "-memory", CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM, disk_gb, memory_size_mb) diskhash_param = _create_param(diskhash_path, basevm_name + "-diskhash", CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH, disk_gb, memory_size_mb) memoryhash_param = _create_param(memoryhash_path, basevm_name + "-memhash", CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH, disk_gb, memory_size_mb) url = "://".join((glance_endpoint.scheme, glance_endpoint.netloc)) gclient = glance_client.Client('1', url, token=token, insecure=True) sys.stdout.write("upload base memory to glance\n") glance_memory = gclient.images.create(**memory_param) sys.stdout.write("upload base disk hash to glance\n") glance_diskhash = gclient.images.create(**diskhash_param) sys.stdout.write("upload base memory hash to glance\n") glance_memoryhash = gclient.images.create(**memoryhash_param) # upload Base disk at the last to have references for other image files glance_ref = { CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM: glance_memory.id, CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH: glance_diskhash.id, CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH: glance_memoryhash.id, CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE: libvirt_xml_str.replace("\n", "") # API cannot send '\n' } disk_param['properties'].update(glance_ref) sys.stdout.write("upload base disk to glance\n") glance_disk = gclient.images.create(**disk_param) # delete temp dir if os.path.exists(temp_dir): shutil.rmtree(temp_dir) return glance_disk
def request_import_basevm(server_address, token, endpoint, glance_endpoint, import_filepath, basevm_name): def _create_param(filepath, image_name, image_type, disk_size, mem_size): properties = { "image_type": "snapshot", "image_location": "snapshot", CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET: "True", CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE: image_type, CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID: base_hashvalue, } param = { "name": "%s" % image_name, "data": open(filepath, "rb"), "size": os.path.getsize(filepath), "is_public": True, "disk_format": "raw", "container_format": "bare", "min_disk": disk_size, "min_ram": mem_size, "properties": properties, } return param (base_hashvalue, disk_name, memory_name, diskhash_name, memoryhash_name) = \ PackagingUtil._get_basevm_attribute(import_filepath) # check duplicated base VM image_list = get_list(server_address, token, endpoint, "images") for image in image_list: properties = image.get("metadata", None) if properties is None or len(properties) == 0: continue if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \ CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK: continue base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID) if base_sha256_uuid == base_hashvalue: msg = "Duplicated base VM is already exists on the system\n" msg += "Image UUID of duplicated Base VM: %s\n" % image['id'] raise CloudletClientError(msg) # decompress files temp_dir = mkdtemp(prefix="cloudlet-base-") sys.stdout.write( "Decompressing zipfile(%s) to temp dir(%s)\n" % (import_filepath, temp_dir)) zipbase = zipfile.ZipFile(_FileFile("file:///%s" % import_filepath), 'r') zipbase.extractall(temp_dir) disk_path = os.path.join(temp_dir, disk_name) memory_path = os.path.join(temp_dir, memory_name) diskhash_path = os.path.join(temp_dir, diskhash_name) memoryhash_path = os.path.join(temp_dir, memoryhash_name) # create new flavor if nothing matches memory_header = elijah_memory_util._QemuMemoryHeader(open(memory_path)) libvirt_xml_str = memory_header.xml cpu_count, memory_size_mb = get_resource_size(libvirt_xml_str) disk_gb = int(math.ceil(os.path.getsize(disk_path)/1024/1024/1024)) flavor_list = get_list(server_address, token, endpoint, "flavors") flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count, memory_size_mb, disk_gb) if flavor_id == None: flavor_name = "cloudlet-flavor-%s" % basevm_name flavor_ref, flavor_id = create_flavor(server_address, token, endpoint, cpu_count, memory_size_mb, disk_gb, flavor_name) sys.stdout.write("Create new flavor for the base VM\n") # upload Base VM disk_param = _create_param(disk_path, basevm_name + "-disk", CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK, disk_gb, memory_size_mb) memory_param = _create_param(memory_path, basevm_name + "-memory", CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM, disk_gb, memory_size_mb) diskhash_param = _create_param(diskhash_path, basevm_name + "-diskhash", CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH, disk_gb, memory_size_mb) memoryhash_param = _create_param(memoryhash_path, basevm_name + "-memhash", CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH, disk_gb, memory_size_mb) url = "://".join((glance_endpoint.scheme, glance_endpoint.netloc)) gclient = glance_client.Client('1', url, token=token, insecure=True) sys.stdout.write("upload base memory to glance\n") glance_memory = gclient.images.create(**memory_param) sys.stdout.write("upload base disk hash to glance\n") glance_diskhash = gclient.images.create(**diskhash_param) sys.stdout.write("upload base memory hash to glance\n") glance_memoryhash = gclient.images.create(**memoryhash_param) # upload Base disk at the last to have references for other image files glance_ref = { CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM: glance_memory.id, CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH: glance_diskhash.id, CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH: glance_memoryhash.id, CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE: libvirt_xml_str.replace("\n", "") # API cannot send '\n' } disk_param['properties'].update(glance_ref) sys.stdout.write("upload base disk to glance\n") glance_disk = gclient.images.create(**disk_param) # delete temp dir if os.path.exists(temp_dir): shutil.rmtree(temp_dir) return glance_disk