def _delete_image_on_vsan(self, datastore_id, image_id): self._logger.info("_delete_image_on_vsan: datastore_id=%s, image_id=%s" % (datastore_id, image_id)) # clear ddb.deletable flag in .vmdk file which would otherwise cause # Vmacore::File::PermissionDeniedException (PR 1704935) vmdk_path = os_vmdk_path(datastore_id, image_id, IMAGE_FOLDER_NAME_PREFIX) temp_path = "%s~" % vmdk_path pattern = re.compile("^ddb.deletable = ") disk_file = open(vmdk_path) temp_file = open(temp_path, "w+") for line in disk_file: if not pattern.match(line): temp_file.write(line) else: self._logger.info("_delete_image_on_vsan: skip %s" % line) temp_file.close() disk_file.close() os.rename(temp_path, vmdk_path) # delete vdisk self._host_client.delete_file(vmdk_path) # delete folder content which would otherwise cause vim.fault.DirectoryNotEmpty (PR 1721520) image_dir = self._image_directory(datastore_id, image_id) for entry in os.listdir(image_dir): if not entry.startswith('.') or entry.endswith(".lck"): self._logger.info("_delete_image_on_vsan: delete %s" % os.path.join(image_dir, entry)) entry_full_path = os.path.join(image_dir, entry) if os.path.isdir(entry_full_path): rm_rf(entry_full_path) else: os.unlink(entry_full_path) # delete folder (osfs namespace) self._host_client.delete_file(image_dir)
def _clean_gc_dir(self, datastore_id): """ Clean may fail but can be retried later """ dir_path = os_datastore_path(datastore_id, GC_IMAGE_FOLDER) for sub_dir in os.listdir(dir_path): rm_rf(os.path.join(dir_path, sub_dir))
def spawn_agents(self): os.setpgrp() if os.path.exists(MultiAgent.IMAGE_PATH): rm_rf(MultiAgent.IMAGE_PATH) mkdir_p(MultiAgent.IMAGE_PATH) for index in xrange(self.agent_count): # Create config sub-dirs config_path = self.create_config(index) log_file = self.get_log_file(index) # Set up argument list args = self.argv[:] args.append("--multi-agent-id") args.append(str(index)) args.append("--config-path") args.append(config_path) args.append("--logging-file") args.append(log_file) args.append("--port") args.append(str(self.agent_port + index)) args.append("--datastores") args.append("DataStore-" + str(index).zfill(4)) args.append("--vm-network") args.append("Network-" + str(index).zfill(4)) command = '' for arg in args: command += ' ' command += arg proc = subprocess.Popen(args) self.procs.append(proc) signal.signal(signal.SIGTERM, self._signal_handler) signal.pause() self.cleanup() sys.exit(0)
def _image_sweeper_rm_rf(self, directory): try: rm_rf(directory) except Exception as ex: self._logger.warning( "Cannot rm_rf dir: %s, %s" % (directory, ex))
def delete_tmp_dir(self, datastore_id, tmp_dir): """ Deletes a temp image directory by moving it to a GC directory """ file_path = os_datastore_path(datastore_id, tmp_dir) if not os.path.exists(file_path): self._logger.info("Tmp dir %s not" % file_path) raise DirectoryNotFound("Directory %s not found" % file_path) rm_rf(file_path)
def test_rm_rf(self): tempdir = mkdtemp() assert_that(os.path.exists(tempdir), is_(True)) rm_rf(tempdir) assert_that(os.path.exists(tempdir), is_(False)) # rm_rf a dir that doesn't exist shouldn't raise exception rm_rf(tempdir)
def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port): if destination_image_id is None: destination_image_id = image_id metadata = self._read_metadata(image_datastore, image_id) shadow_vm_id = self._create_shadow_vm() # place transfer.vmdk under shadow_vm_path to work around VSAN's restriction on # files at datastore top-level shadow_vm_path = os_datastore_path( self._get_shadow_vm_datastore(), compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id)) transfer_vmdk_path = os.path.join(shadow_vm_path, "transfer.vmdk") self._logger.info("transfer_vmdk_path = %s" % transfer_vmdk_path) agent_client = None try: read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore, shadow_vm_id) try: self.download_file(disk_url, transfer_vmdk_path, read_lease) finally: read_lease.Complete() agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() vm_path, vm_id = self._prepare_receive_image( agent_client, destination_image_id, destination_datastore) spec = self._create_import_vm_spec(vm_id, destination_datastore, vm_path) self._send_image(agent_client, host, transfer_vmdk_path, spec) self._register_imported_image_at_host(agent_client, destination_image_id, destination_datastore, vm_id, metadata) return vm_id finally: try: os.unlink(transfer_vmdk_path) except OSError: pass self._delete_shadow_vm(shadow_vm_id) rm_rf(shadow_vm_path) if agent_client: agent_client.close()
def _ensure_directory_cleanup(self, vm_ds_path): # Upon successful destroy of VM, log any stray files still left in the # VM directory and delete the directory. vm_dir = os.path.dirname(datastore_to_os_path(vm_ds_path)) if os.path.isdir(vm_dir): files = os.listdir(vm_dir) for f in files: if f.endswith(".vmdk"): self._logger.info("Stray disk " "(possible data leak): %s" % f) else: self._logger.info("Stray file: %s" % f) self._logger.warning("Force delete vm directory %s" % vm_dir) rm_rf(vm_dir)
def _move_image(self, image_id, datastore, tmp_dir): """ Atomic move of a tmp folder into the image datastore. Handles concurrent moves by locking a well know derivative of the image_id while doing the atomic move. The exclusive file lock ensures that only one move is successful. Has the following side effects: a - If the destination image already exists, it is assumed that someone else successfully copied the image over and the temp directory is deleted. b - If we fail to acquire the file lock after retrying 3 times, or the atomic move fails, the tmp image directory will be left behind and needs to be garbage collected later. image_id: String.The image id of the image being moved. datastore: String. The datastore id of the datastore. tmp_dir: String. The absolute path of the temp image directory. raises: OsError if the move fails AcquireLockFailure, InvalidFile if we fail to lock the destination image. """ ds_type = self._get_datastore_type(datastore) image_path = os.path.dirname( os_vmdk_path(datastore, image_id, IMAGE_FOLDER_NAME)) parent_path = os.path.dirname(image_path) # Create the parent image directory if it doesn't exist. try: mkdir_p(parent_path) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(parent_path): # Parent directory exists nothing to do. pass else: raise try: with FileBackedLock(image_path, ds_type, retry=300, wait_secs=0.01): # wait lock for 3 seconds if self._check_image_repair(image_id, datastore): raise DiskAlreadyExistException("Image already exists") shutil.move(tmp_dir, image_path) except (AcquireLockFailure, InvalidFile): self._logger.info("Unable to lock %s for atomic move" % image_id) raise except DiskAlreadyExistException: self._logger.info("Image %s already copied" % image_id) rm_rf(tmp_dir) raise
def _move_image(self, image_id, datastore, tmp_dir): """ Atomic move of a tmp folder into the image datastore. Handles concurrent moves by locking a well know derivative of the image_id while doing the atomic move. The exclusive file lock ensures that only one move is successful. Has the following side effects: a - If the destination image already exists, it is assumed that someone else successfully copied the image over and the temp directory is deleted. b - If we fail to acquire the file lock after retrying 3 times, or the atomic move fails, the tmp image directory will be left behind and needs to be garbage collected later. image_id: String.The image id of the image being moved. datastore: String. The datastore id of the datastore. tmp_dir: String. The absolute path of the temp image directory. raises: OsError if the move fails AcquireLockFailure, InvalidFile if we fail to lock the destination image. """ ds_type = self._get_datastore_type(datastore) image_path = os.path.dirname(os_vmdk_path(datastore, image_id, IMAGE_FOLDER_NAME)) parent_path = os.path.dirname(image_path) # Create the parent image directory if it doesn't exist. try: mkdir_p(parent_path) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(parent_path): # Parent directory exists nothing to do. pass else: raise try: with FileBackedLock(image_path, ds_type, retry=300, wait_secs=0.01): # wait lock for 3 seconds if self._check_image_repair(image_id, datastore): raise DiskAlreadyExistException("Image already exists") shutil.move(tmp_dir, image_path) except (AcquireLockFailure, InvalidFile): self._logger.info("Unable to lock %s for atomic move" % image_id) raise except DiskAlreadyExistException: self._logger.info("Image %s already copied" % image_id) rm_rf(tmp_dir) raise
def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port): if destination_image_id is None: destination_image_id = image_id metadata = self._read_metadata(image_datastore, image_id) shadow_vm_id = self._create_shadow_vm() tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % ( self._get_shadow_vm_datastore(), shadow_vm_id) self._logger.info("http_disk_transfer: tmp_path = %s" % tmp_path) agent_client = None try: read_lease, disk_url = self._get_image_stream_from_shadow_vm( image_id, image_datastore, shadow_vm_id) try: self.download_file(disk_url, tmp_path, read_lease) finally: read_lease.Complete() agent_client = DirectClient("Host", Host.Client, host, port) agent_client.connect() vm_path, vm_id = self._prepare_receive_image( agent_client, destination_image_id, destination_datastore) spec = self._create_import_vm_spec(vm_id, destination_datastore, vm_path) self._send_image(agent_client, host, tmp_path, spec) self._register_imported_image_at_host(agent_client, destination_image_id, destination_datastore, vm_id, metadata) return vm_id finally: try: os.unlink(tmp_path) except OSError: pass self._delete_shadow_vm(shadow_vm_id) rm_rf( os_datastore_path( self._get_shadow_vm_datastore(), compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id))) if agent_client: agent_client.close()
def test_create_vm_with_ephemeral_disks(self): image_dir = os.path.join( "/tmp/images", FakeHypervisor.datastore_id(self.get_image_datastore())) try: mkdir_p(image_dir) with tempfile.NamedTemporaryFile(dir=image_dir, suffix=".vmdk") as f: # The temp file name created is # "/tmp/image/<ds>/<uniquepart>.vmdk". # This simulates an image being present on the agent, # The file is deleted on leaving the context. image_id = f.name[f.name.rfind("/") + 1:-5] self._test_create_vm_with_ephemeral_disks(image_id) finally: rm_rf(image_dir)
def provision(self, request): """ Provision an agent for photon controller by providing its boostrapping configuration. :type request: ProvisionRequest :rtype: ProvisionResponse """ # cleanup vibs uploaded by deployer. # in multi-vibs scenario, provision is called after all vibs are successfully installed. rm_rf("/tmp/photon-controller-vibs") try: agent_config = common.services.get(ServiceName.AGENT_CONFIG) agent_config.update_config(request) except InvalidConfig as e: return ProvisionResponse(ProvisionResultCode.INVALID_CONFIG, str(e)) except Exception, e: self._logger.warning("Unexpected exception", exc_info=True) return ProvisionResponse(ProvisionResultCode.SYSTEM_ERROR, str(e))
def send_image_to_host(self, image_id, image_datastore, destination_image_id, destination_datastore, host, port): manifest, metadata = self._read_metadata(image_datastore, image_id) shadow_vm_id = self._create_shadow_vm() tmp_path = "/vmfs/volumes/%s/%s_transfer.vmdk" % ( self._get_shadow_vm_datastore(), shadow_vm_id) try: read_lease, disk_url =\ self._get_image_stream_from_shadow_vm( image_id, image_datastore, shadow_vm_id) try: self.download_file(disk_url, tmp_path, read_lease) finally: read_lease.Complete() if destination_image_id is None: destination_image_id = image_id spec = self._create_import_vm_spec( destination_image_id, destination_datastore) imported_vm_name = self._send_image( tmp_path, manifest, metadata, spec, destination_image_id, destination_datastore, host, port) return imported_vm_name finally: try: os.unlink(tmp_path) except OSError: pass self._delete_shadow_vm(shadow_vm_id) rm_rf(os_datastore_path(self._get_shadow_vm_datastore(), compond_path_join(VM_FOLDER_NAME_PREFIX, shadow_vm_id)))