def create(self, disks, ssh_key: Artifact, snapshot_type: SnapshotType = SnapshotType.FULL, target_version: str = None, mem_file_name: str = "vm.mem", snapshot_name: str = "vm.vmstate", net_ifaces=None): """Create a Snapshot object from a microvm and artifacts.""" # Disable API timeout as the APIs for snapshot related procedures # take longer. self._microvm.api_session.untime() snapshot_dir = self.create_snapshot_dir() self._microvm.pause_to_snapshot( mem_file_path="/snapshot/" + mem_file_name, snapshot_path="/snapshot/" + snapshot_name, diff=snapshot_type == SnapshotType.DIFF, version=target_version) # Create a copy of the ssh_key artifact. ssh_key_copy = ssh_key.copy() mem_path = os.path.join(snapshot_dir, mem_file_name) vmstate_path = os.path.join(snapshot_dir, snapshot_name) return Snapshot( mem=mem_path, vmstate=vmstate_path, # TODO: To support more disks we need to figure out a # simple and flexible way to store snapshot artifacts # in S3. This should be done in a PR where we add tests # that resume from S3 snapshot artifacts. disks=disks, net_ifaces=net_ifaces or [NetIfaceConfig()], ssh_key=ssh_key_copy.local_path())
def create(self, disks, ssh_key: Artifact, snapshot_type: SnapshotType = SnapshotType.FULL, target_version: str = None): """Create a Snapshot object from a microvm and artifacts.""" # Disable API timeout as the APIs for snapshot related procedures # take longer. self._microvm.api_session.untime() chroot_path = self._microvm.jailer.chroot_path() snapshot_dir = os.path.join(chroot_path, "snapshot") Path(snapshot_dir).mkdir(parents=True, exist_ok=True) cmd = 'chown {}:{} {}'.format(self._microvm.jailer.uid, self._microvm.jailer.gid, snapshot_dir) utils.run_cmd(cmd) self._microvm.pause_to_snapshot( mem_file_path="/snapshot/vm.mem", snapshot_path="/snapshot/vm.vmstate", diff=snapshot_type == SnapshotType.DIFF, version=target_version) # Create a copy of the ssh_key artifact. ssh_key_copy = ssh_key.copy() mem_path = os.path.join(snapshot_dir, "vm.mem") vmstate_path = os.path.join(snapshot_dir, "vm.vmstate") return Snapshot(mem=mem_path, vmstate=vmstate_path, # TODO: To support more disks we need to figure out a # simple and flexible way to store snapshot artifacts # in S3. This should be done in a PR where we add tests # that resume from S3 snapshot artifacts. disks=disks, ssh_key=ssh_key_copy.local_path())
def create(self, disks, ssh_key: Artifact, snapshot_type: SnapshotType = SnapshotType.FULL, target_version: str = None, mem_file_name: str = "vm.mem", snapshot_name: str = "vm.vmstate", net_ifaces=None, use_ramdisk=False): """Create a Snapshot object from a microvm and artifacts.""" if use_ramdisk: snaps_dir = self._microvm.jailer.chroot_ramfs_path() mem_full_path = os.path.join(snaps_dir, mem_file_name) vmstate_full_path = os.path.join(snaps_dir, snapshot_name) memsize = self._microvm.machine_cfg.configuration['mem_size_mib'] # Pre-allocate ram for memfile to eliminate allocation variability. utils.run_cmd('dd if=/dev/zero of={} bs=1M count={}'.format( mem_full_path, memsize )) cmd = 'chown {}:{} {}'.format( self._microvm.jailer.uid, self._microvm.jailer.gid, mem_full_path ) utils.run_cmd(cmd) else: snaps_dir = self.create_snapshot_dir() mem_full_path = os.path.join(snaps_dir, mem_file_name) vmstate_full_path = os.path.join(snaps_dir, snapshot_name) snaps_dir_name = os.path.basename(snaps_dir) self._microvm.pause_to_snapshot( mem_file_path=os.path.join('/', snaps_dir_name, mem_file_name), snapshot_path=os.path.join('/', snaps_dir_name, snapshot_name), diff=snapshot_type == SnapshotType.DIFF, version=target_version) # Create a copy of the ssh_key artifact. ssh_key_copy = ssh_key.copy() return Snapshot(mem=mem_full_path, vmstate=vmstate_full_path, # TODO: To support more disks we need to figure out a # simple and flexible way to store snapshot artifacts # in S3. This should be done in a PR where we add tests # that resume from S3 snapshot artifacts. disks=disks, net_ifaces=net_ifaces or [NetIfaceConfig()], ssh_key=ssh_key_copy.local_path())