Beispiel #1
0
    def x_export_base(self, args):
        _, base = self.find_base(args.id)

        output_path = args.dest
        if base is not None:
            PackagingUtil.export_basevm(output_path, base.disk_path,
                                        base.hash_value)
        else:
            print "Failed to find matching image with id: %s" % args.id
Beispiel #2
0
 def test_export_base(self):
     disk_path = self.base_vm_path
     dbconn, matching_basevm = PackagingUtil._get_matching_basevm(disk_path)
     self.assertTrue(matching_basevm is not None,
                     "Cannot find the requested base VM")
     try:
         PackagingUtil.export_basevm(self.export_outpath,
                                     matching_basevm.disk_path,
                                     matching_basevm.hash_value)
     except Exception as e:
         self.assertTrue(False, "Failed to export base VM: %s" % str(e))
     else:
         self.assertTrue(os.path.join(self.export_outpath))
 def test_export_base(self):
     disk_path=self.base_vm_path
     dbconn, matching_basevm = PackagingUtil._get_matching_basevm(disk_path)
     self.assertTrue(matching_basevm is not None,
                     "Cannot find the requested base VM")
     try:
         PackagingUtil.export_basevm(
             self.export_outpath,
             matching_basevm.disk_path,
             matching_basevm.hash_value
         )
     except Exception as e:
         self.assertTrue(False, "Failed to export base VM: %s" % str(e))
     else:
         self.assertTrue(os.path.join(self.export_outpath))
Beispiel #4
0
    def x_synthesize(self, args):
        err = False
        overlay_meta = args.snapshot
        is_zip_contained, url_path = PackagingUtil.is_zip_contained(
            overlay_meta)
        if is_zip_contained is True:
            overlay_meta = url_path
        LOG.info("Beginning synthesis of: %s", args.snapshot)
        try:
            path = synthesis.generate_pidfile(args.title, args.ports)
            synthesis.synthesize(None,
                                 overlay_meta,
                                 disk_only=args.disk_only,
                                 handoff_url=None,
                                 zip_container=is_zip_contained,
                                 title=args.title,
                                 fwd_ports=args.ports)
        except Exception as e:
            LOG.error("Failed to synthesize: %s", str(e))
            err = True
        finally:
            if os.path.exists(path):
                os.unlink(path)

        if err:
            raise Exception(e)
Beispiel #5
0
    def x_import_base(self, args):
        source = args.path
        if os.path.exists(source) is False or os.access(source,
                                                        os.R_OK) is False:
            print "Cannot read file: %s" % source
            return 1

        (base_hashvalue, disk_name, _, _, _) = \
                PackagingUtil._get_basevm_attribute(source)
        disk_image_path = DIR_NEPHELE_IMAGES + disk_name

        # check if this filename already exists and warn
        if os.path.exists(disk_image_path):
            if not raw_input(
                    "An image with this filename already exists.\nAre you sure you wish to overwrite the following base image: %s? (y/N): "
                    % (disk_image_path)).lower().strip().startswith("y"):
                sys.exit(1)
            if not raw_input(
                    "This will render any snapshots based on this image unusable. Are you certain? (y/N): "
            ).lower().strip().startswith("y"):
                sys.exit(1)
        print "Decompressing image to %s..." % DIR_NEPHELE_IMAGES
        zipbase = zipfile.ZipFile(source, 'r')
        zipbase.extractall(DIR_NEPHELE_IMAGES)
        print "Extracted image files to %s." % (disk_image_path)

        # add to DB
        new_basevm = table_def.BaseVM(disk_image_path, base_hashvalue, source)
        dbconn = DBConnector()
        dbconn.add_item(new_basevm)

        #restart the stream-server to reload list of images
        os.system('service stream-server restart')
Beispiel #6
0
 def test_import_base(self):
     try:
         self.base_vm_path, self.base_hashvalue = PackagingUtil.import_basevm(
             self.base_vm_cirros_filepath)
         self.assertTrue(os.path.exists(self.base_vm_path))
         self.assertIsInstance(self.base_hashvalue, str)
     except Exception as e:
         self.assertTrue(False, str(e))
 def test_import_base(self):
     try:
         self.base_vm_path, self.base_hashvalue = PackagingUtil.import_basevm(
             self.base_vm_cirros_filepath)
         self.assertTrue(os.path.exists(self.base_vm_path))
         self.assertIsInstance(self.base_hashvalue, str)
     except Exception as e:
         self.assertTrue(False, str(e))
 def delete_basevm(base_path, base_hashvalue):
     if base_path is not None and base_hashvalue is not None:
         disk_path=base_path
         hash_value=base_hashvalue
         dbconn, matching_basevm = PackagingUtil._get_matching_basevm(disk_path)
         if matching_basevm:
             dbconn.del_item(matching_basevm)
         if matching_basevm:
             base_dir = os.path.dirname(base_path)
             shutil.rmtree(base_dir)
 def setUp(self):
     super(TestCreatingVMOverlay, self).setUp()
     self.temp_dir = mkdtemp(prefix="cloudlet-test-vmoverlay-")
     self.base_vm_cirros_filepath = os.path.join(
         self.temp_dir, os.path.basename(Const.base_vm_cirros_url))
     VMUtility.download_baseVM(Const.base_vm_cirros_url,
                               self.base_vm_cirros_filepath)
     self.base_vm_path, self.base_hashvalue = PackagingUtil.import_basevm(
         self.base_vm_cirros_filepath)
     self.overlay_filepath = None
Beispiel #10
0
 def delete_basevm(base_path, base_hashvalue):
     if base_path is not None and base_hashvalue is not None:
         disk_path = base_path
         hash_value = base_hashvalue
         dbconn, matching_basevm = PackagingUtil._get_matching_basevm(
             disk_path)
         if matching_basevm:
             dbconn.del_item(matching_basevm)
         if matching_basevm:
             base_dir = os.path.dirname(base_path)
             shutil.rmtree(base_dir)
    def setUp(self):
        super(TestBaseExport, self).setUp()
        # import base VM to export it
        self.temp_dir = mkdtemp(prefix="cloudlet-test-basevm-")
        self.base_vm_cirros_filepath = os.path.join(
            self.temp_dir, os.path.basename(Const.base_vm_cirros_url))
        VMUtility.download_baseVM(Const.base_vm_cirros_url,
                                  self.base_vm_cirros_filepath)
        self.base_vm_path, self.base_hashvalue = PackagingUtil.import_basevm(
            self.base_vm_cirros_filepath)

        # path for exported base VM
        self.export_outpath = os.path.join(self.temp_dir, "exported-base.zip")
Beispiel #12
0
    def setUp(self):
        super(TestBaseExport, self).setUp()
        # import base VM to export it
        self.temp_dir = mkdtemp(prefix="cloudlet-test-basevm-")
        self.base_vm_cirros_filepath = os.path.join(
            self.temp_dir, os.path.basename(Const.base_vm_cirros_url))
        VMUtility.download_baseVM(Const.base_vm_cirros_url,
                                  self.base_vm_cirros_filepath)
        self.base_vm_path, self.base_hashvalue = PackagingUtil.import_basevm(
            self.base_vm_cirros_filepath)

        # path for exported base VM
        self.export_outpath = os.path.join(self.temp_dir, "exported-base.zip")
Beispiel #13
0
    def setUp(self):
        super(TestSynthesis, self).setUp()
        # check parameters
        self.overlay_url = Const.overlay_url_cirros
        try:
            urllib2.urlopen(self.overlay_url)
        except Exception as e:
            self.assertTrue(
                False,
                "Inlid overlay URL: %s\n%s" % (self.overlay_url, str(e))
            )

        # import base VM
        self.temp_dir = mkdtemp(prefix="cloudlet-test-vmoverlay-")
        self.base_vm_cirros_filepath = os.path.join(
            self.temp_dir, os.path.basename(Const.base_vm_cirros_url))
        VMUtility.download_baseVM(Const.base_vm_cirros_url,
                                  self.base_vm_cirros_filepath)
        self.base_vm_path, self.base_hashvalue = PackagingUtil.import_basevm(
            self.base_vm_cirros_filepath)
    def test_create_vm_overlay(self):
        disk_path=self.base_vm_path
        dbconn, matching_basevm = PackagingUtil._get_matching_basevm(disk_path)
        base_diskpath = matching_basevm.disk_path
        options = Options()
        options.TRIM_SUPPORT = True
        options.ZIP_CONTAINER = True
        options.FREE_SUPPORT = False
        options.DISK_ONLY = False
        try:
            vm_overlay = synthesis.VM_Overlay(base_diskpath, options)
            machine = vm_overlay.resume_basevm()
            VM_status = VMUtility.get_VM_status(machine)
            self.assertEqual(VM_status, libvirt.VIR_DOMAIN_RUNNING)

            # wait for VM running
            time.sleep(10)
            vm_overlay.create_overlay()
            self.overlay_filepath = vm_overlay.overlay_zipfile
            self.assertTrue(os.path.exists(self.overlay_filepath), True)
        except Exception as e:
            self.assertTrue(False, "cannot create VM overlay: %s" % str(e))
    num_core = 1
    bandwidth = [5, 10, 15, 20, 25, 30, 30]
    bandwidth.reverse()
    #num_cores_list = [4,4,3,2,1]; network_bw = 10

    for (base_path, overlay_path) in workloads:
        for network_bw in bandwidth:
        #for num_core in num_cores_list:
            # confiure network using TC
            cmd = "sudo %s restart %d" % (os.path.abspath("./traffic_shaping"), network_bw)
            LOG.debug(cmd)
            LOG.debug(subprocess.check_output(cmd.split(" ")))
            #VMOverlayCreationMode.USE_STATIC_NETWORK_BANDWIDTH = network_bw

            # generate mode
            VMOverlayCreationMode.LIVE_MIGRATION_STOP = VMOverlayCreationMode.LIVE_MIGRATION_FINISH_USE_SNAPSHOT_SIZE
            overlay_mode = VMOverlayCreationMode.get_pipelined_multi_process_finite_queue(num_cores=num_core)
            overlay_mode.COMPRESSION_ALGORITHM_TYPE = Const.COMPRESSION_GZIP
            overlay_mode.COMPRESSION_ALGORITHM_SPEED = 1
            overlay_mode.MEMORY_DIFF_ALGORITHM = "none"
            overlay_mode.DISK_DIFF_ALGORITHM = "none"

            LOG.debug("network-test\t%s-%s (Mbps)" % (network_bw, num_core))
            is_url, overlay_url = PackagingUtil.is_zip_contained(overlay_path)
            #run_file(base_path, overlay_url, overlay_mode)
            run_network(base_path, overlay_url, overlay_mode)

            time.sleep(30)

Beispiel #16
0
def request_import_basevm(server_address, token, endpoint, glance_endpoint,
                          import_filepath, basevm_name):
    def _create_param(filepath, image_name, image_type, disk_size, mem_size):
        properties = {
            "image_type": "snapshot",
            "image_location": "snapshot",
            CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET: "True",
            CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE: image_type,
            CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID: base_hashvalue,
        }
        param = {
            "name": "%s" % image_name,
            "data": open(filepath, "rb"),
            "size": os.path.getsize(filepath),
            "is_public": True,
            "disk_format": "raw",
            "container_format": "bare",
            "min_disk": disk_size,
            "min_ram": mem_size,
            "properties": properties,
        }
        return param
    (base_hashvalue, disk_name, memory_name, diskhash_name, memoryhash_name) = \
        PackagingUtil._get_basevm_attribute(import_filepath)

    # check duplicated base VM
    image_list = get_list(server_address, token, endpoint, "images")
    for image in image_list:
        properties = image.get("metadata", None)
        if properties is None or len(properties) == 0:
            continue
        if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \
                CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK:
            continue
        base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID)
        if base_sha256_uuid == base_hashvalue:
            msg = "Duplicated base VM is already exists on the system\n"
            msg += "Image UUID of duplicated Base VM: %s\n" % image['id']
            raise CloudletClientError(msg)

    # decompress files
    temp_dir = mkdtemp(prefix="cloudlet-base-")
    sys.stdout.write("Decompressing zipfile(%s) to temp dir(%s)\n" %
                     (import_filepath, temp_dir))
    zipbase = zipfile.ZipFile(
        _FileFile("file:///%s" % os.path.abspath(import_filepath)), 'r')
    zipbase.extractall(temp_dir)
    disk_path = os.path.join(temp_dir, disk_name)
    memory_path = os.path.join(temp_dir, memory_name)
    diskhash_path = os.path.join(temp_dir, diskhash_name)
    memoryhash_path = os.path.join(temp_dir, memoryhash_name)

    # create new flavor if nothing matches
    memory_header = elijah_memory_util._QemuMemoryHeader(open(memory_path))
    libvirt_xml_str = memory_header.xml
    cpu_count, memory_size_mb = get_resource_size(libvirt_xml_str)
    disk_gb = int(math.ceil(os.path.getsize(disk_path) / 1024 / 1024 / 1024))
    flavor_list = get_list(server_address, token, endpoint, "flavors")
    flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count,
                                                 memory_size_mb, disk_gb)
    if flavor_id == None:
        flavor_name = "cloudlet-flavor-%s" % basevm_name
        flavor_ref, flavor_id = create_flavor(server_address, token, endpoint,
                                              cpu_count, memory_size_mb,
                                              disk_gb, flavor_name)
        sys.stdout.write("Create new flavor for the base VM\n")

    # upload Base VM
    disk_param = _create_param(disk_path, basevm_name + "-disk",
                               CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK, disk_gb,
                               memory_size_mb)
    memory_param = _create_param(memory_path, basevm_name + "-memory",
                                 CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM, disk_gb,
                                 memory_size_mb)
    diskhash_param = _create_param(diskhash_path, basevm_name + "-diskhash",
                                   CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH,
                                   disk_gb, memory_size_mb)
    memoryhash_param = _create_param(memoryhash_path, basevm_name + "-memhash",
                                     CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH,
                                     disk_gb, memory_size_mb)
    url = "://".join((glance_endpoint.scheme, glance_endpoint.netloc))
    gclient = glance_client.Client('1', url, token=token, insecure=True)
    sys.stdout.write("upload base memory to glance\n")
    glance_memory = gclient.images.create(**memory_param)
    sys.stdout.write("upload base disk hash to glance\n")
    glance_diskhash = gclient.images.create(**diskhash_param)
    sys.stdout.write("upload base memory hash to glance\n")
    glance_memoryhash = gclient.images.create(**memoryhash_param)

    # upload Base disk at the last to have references for other image files
    glance_ref = {
        CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM: glance_memory.id,
        CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH: glance_diskhash.id,
        CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH: glance_memoryhash.id,
        CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE:
        libvirt_xml_str.replace("\n", "")  # API cannot send '\n'
    }
    disk_param['properties'].update(glance_ref)
    sys.stdout.write("upload base disk to glance\n")
    glance_disk = gclient.images.create(**disk_param)

    # delete temp dir
    if os.path.exists(temp_dir):
        shutil.rmtree(temp_dir)

    return glance_disk
        (windows_base_path, mar),
        #(windows_base_path, face),
        #(linux_base_path, moped),
        #(linux_base_path, speech),
        #(linux_base_path, random),
        #(linux_base_path, fluid),
    ]
    for (base_path, overlay_path) in workloads:
        if os.path.exists(base_path) == False:
            raise ProfilingError("Invalid path to %s" % base_path)
        if os.path.exists(overlay_path) == False:
            raise ProfilingError("Invalid path to %s" % overlay_path)

    num_core = 1
    mode_list = profiling_workload(num_core)
    VMOverlayCreationMode.LIVE_MIGRATION_STOP = VMOverlayCreationMode.LIVE_MIGRATION_FINISH_USE_SNAPSHOT_SIZE
    for (base_path, overlay_path) in workloads:
        for each_mode in mode_list:
            # start BW control
            bw_control = NetworkBWcontrol()
            bw_control.start()

            LOG.debug("network-test\tvarying-%s (Mbps)" % (num_core))
            is_url, overlay_url = PackagingUtil.is_zip_contained(overlay_path)
            run_network(base_path, overlay_url, each_mode)

            bw_control.terminate()
            bw_control.join()
            time.sleep(30)

def request_import_basevm(server_address, token, 
                          endpoint, glance_endpoint,
                          import_filepath, basevm_name):
    def _create_param(filepath, image_name, image_type, disk_size, mem_size):
        properties = {
            "image_type": "snapshot",
            "image_location": "snapshot",
            CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET: "True",
            CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE: image_type,
            CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID: base_hashvalue,
            }
        param = {
            "name": "%s" % image_name,
            "data": open(filepath, "rb"),
            "size": os.path.getsize(filepath),
            "is_public": True,
            "disk_format": "raw",
            "container_format": "bare",
            "min_disk": disk_size,
            "min_ram": mem_size,
            "properties": properties,
            }
        return param
    (base_hashvalue, disk_name, memory_name, diskhash_name, memoryhash_name) = \
        PackagingUtil._get_basevm_attribute(import_filepath)

    # check duplicated base VM
    image_list = get_list(server_address, token, endpoint, "images")
    for image in image_list:
        properties = image.get("metadata", None)
        if properties is None or len(properties) == 0:
            continue
        if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \
                CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK:
            continue
        base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID)
        if base_sha256_uuid == base_hashvalue:
            msg = "Duplicated base VM is already exists on the system\n"
            msg += "Image UUID of duplicated Base VM: %s\n" % image['id']
            raise CloudletClientError(msg)

    # decompress files
    temp_dir = mkdtemp(prefix="cloudlet-base-")
    sys.stdout.write(
        "Decompressing zipfile(%s) to temp dir(%s)\n" %
        (import_filepath, temp_dir))
    zipbase = zipfile.ZipFile(_FileFile("file:///%s" % import_filepath), 'r')
    zipbase.extractall(temp_dir)
    disk_path = os.path.join(temp_dir, disk_name)
    memory_path = os.path.join(temp_dir, memory_name)
    diskhash_path = os.path.join(temp_dir, diskhash_name)
    memoryhash_path = os.path.join(temp_dir, memoryhash_name)

    # create new flavor if nothing matches
    memory_header = elijah_memory_util._QemuMemoryHeader(open(memory_path))
    libvirt_xml_str = memory_header.xml
    cpu_count, memory_size_mb = get_resource_size(libvirt_xml_str)
    disk_gb = int(math.ceil(os.path.getsize(disk_path)/1024/1024/1024))
    flavor_list = get_list(server_address, token, endpoint, "flavors")
    flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count,
                                                 memory_size_mb, disk_gb)
    if flavor_id == None:
       flavor_name = "cloudlet-flavor-%s" % basevm_name
       flavor_ref, flavor_id = create_flavor(server_address,
                                             token,
                                             endpoint,
                                             cpu_count,
                                             memory_size_mb,
                                             disk_gb,
                                             flavor_name)
       sys.stdout.write("Create new flavor for the base VM\n")

    # upload Base VM
    disk_param = _create_param(disk_path, basevm_name + "-disk",
                               CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK,
                               disk_gb, memory_size_mb)
    memory_param = _create_param(memory_path, basevm_name + "-memory",
                                 CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM,
                                 disk_gb, memory_size_mb)
    diskhash_param = _create_param(diskhash_path, basevm_name + "-diskhash",
                                   CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH,
                                   disk_gb, memory_size_mb)
    memoryhash_param = _create_param(memoryhash_path, basevm_name + "-memhash",
                                     CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH,
                                     disk_gb, memory_size_mb)
    url = "://".join((glance_endpoint.scheme, glance_endpoint.netloc))
    gclient = glance_client.Client('1', url, token=token, insecure=True)
    sys.stdout.write("upload base memory to glance\n")
    glance_memory = gclient.images.create(**memory_param)
    sys.stdout.write("upload base disk hash to glance\n")
    glance_diskhash = gclient.images.create(**diskhash_param)
    sys.stdout.write("upload base memory hash to glance\n")
    glance_memoryhash = gclient.images.create(**memoryhash_param)

    # upload Base disk at the last to have references for other image files
    glance_ref = {
        CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM: glance_memory.id,
        CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH: glance_diskhash.id,
        CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH: glance_memoryhash.id,
        CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE:
        libvirt_xml_str.replace("\n", "")  # API cannot send '\n'
        }
    disk_param['properties'].update(glance_ref)
    sys.stdout.write("upload base disk to glance\n")
    glance_disk = gclient.images.create(**disk_param)

    # delete temp dir
    if os.path.exists(temp_dir):
        shutil.rmtree(temp_dir)

    return glance_disk