def _VerifyNumberOfHardLinksInRawDisk(self, tar, filename, count):
     """Tests if a file on raw disk has a specified number of hard links."""
     tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
     tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
     self.assertEqual(subprocess.call(tar_cmd), 0)
     disk_path = os.path.join(tmp_dir, 'disk.raw')
     with utils.LoadDiskImage(disk_path) as devices:
         self.assertEqual(len(devices), 1)
         mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
         with utils.MountFileSystem(devices[0], mnt_dir):
             self.assertEqual(
                 os.stat(os.path.join(mnt_dir, filename)).st_nlink, count)
 def _VerifyFileInRawDiskEndsWith(self, tar, filename, text):
     """Tests if a file on raw disk contains ends with a specified text."""
     tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
     tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
     self.assertEqual(subprocess.call(tar_cmd), 0)
     disk_path = os.path.join(tmp_dir, 'disk.raw')
     with utils.LoadDiskImage(disk_path) as devices:
         self.assertEqual(len(devices), 1)
         mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
         with utils.MountFileSystem(devices[0], mnt_dir):
             f = open(os.path.join(mnt_dir, filename), 'r')
             file_content = f.read()
             f.close()
             self.assertTrue(file_content.endswith(text))
 def _VerifyImageHas(self, tar, expected):
     """Tests if raw disk contains an expected list of files/directories."""
     tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
     tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
     self.assertEqual(subprocess.call(tar_cmd), 0)
     disk_path = os.path.join(tmp_dir, 'disk.raw')
     with utils.LoadDiskImage(disk_path) as devices:
         self.assertEqual(len(devices), 1)
         mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
         with utils.MountFileSystem(devices[0], mnt_dir):
             found = []
             for root, dirs, files in os.walk(mnt_dir):
                 root = root.replace(mnt_dir, '')
                 for f in files:
                     found.append(os.path.join(root, f))
                 for d in dirs:
                     found.append(os.path.join(root, d))
     self._AssertListEqual(expected, found)
Exemplo n.º 4
0
  def Bundleup(self):
    """Creates a raw disk copy of OS image and bundles it into gzipped tar.

    Returns:
      A size of a generated raw disk and the SHA1 digest of the the tar archive.

    Raises:
      RawDiskError: If number of partitions in a created image doesn't match
                    expected count.
    """

    # Create sparse file with specified size
    disk_file_path = os.path.join(self._scratch_dir, self._disk_file_name)
   # with open(disk_file_path, 'wb') as _:
   #   pass
    self._excludes.append(exclude_spec.ExcludeSpec(disk_file_path))

    logging.info('Initializing disk file')
    partition_start = None
    uuid = None
    if self._disk:
      # If a disk device has been provided then preserve whatever is there on
      # the disk before the first partition in case there is an MBR present.
      partition_start, uuid = self._InitializeDiskFileFromDevice(disk_file_path)
    else:
      #feoff: our code goes this way

      # User didn't specify a disk device. Initialize a device with a simple
      # partition table.
      logging.info("Setting partiton size: " + str(self._fs_size) )
      self._ResizeFile(disk_file_path, self._fs_size)

      # User didn't specify a disk to copy. Create a new partition table
      utils.MakePartitionTable(disk_file_path)
      # Pass 1MB as start to avoid 'Warning: The resulting partition is not
      # properly aligned for best performance.' from parted.
      partition_start = 1024 * 1024

    # Create a new partition starting at partition_start of size
    # self._fs_size - partition_start. 

    #feoff: it's not just a log ensure the size changed. we ensure the size is changed and OS updated info on it. needed in FUSE scenario
    utils.MakePartition(disk_file_path, 'primary', 'ext2', partition_start,
                        self._fs_size-512) # feoff: THE LAST PARM IS THE LAST SECTOR! , not size!

    logging.info("Preparing file disk size " + str(os.path.getsize(disk_file_path)) + " bytes")

    with utils.LoadDiskImage(disk_file_path) as devices:
      # For now we only support disks with a single partition.
      if len(devices) != 1:
        raise RawDiskError(devices)
      # List contents of /dev/mapper to help with debugging. Contents will
      # be listed in debug log only
      utils.RunCommand(['ls', '/dev/mapper'])
      logging.info('Making filesystem')
      uuid = utils.MakeFileSystem(devices[0], self._fs_type, uuid)
    with utils.LoadDiskImage(disk_file_path) as devices:
      if uuid is None:
        raise Exception('Could not get uuid from MakeFileSystem')
      mount_point = tempfile.mkdtemp(dir=self._scratch_dir)
      with utils.MountFileSystem(devices[0], mount_point, self._fs_type):
        logging.info('Copying contents')
        #feoff: temporary disable selinux to ensure rsync works fine, see https://github.com/GoogleCloudPlatform/compute-image-packages/issues/132
        selinux_state = self._setSELinux("0")
        self._CopySourceFiles(mount_point)
        self._CopyPlatformSpecialFiles(mount_point)
        self._ProcessOverwriteList(mount_point)
        self._CleanupNetwork(mount_point)
        self._UpdateFstab(mount_point, uuid)
        # feoff: set dhcp to eth0
        self._SetDhcp(mount_point)
        # feoff: grub
        # should move add_grub to the parm
        add_grub = True
        if add_grub:
            from gcimagebundlelib import grub
            grub.InstallGrub(mount_point, devices[0])
        self._setSELinux(selinux_state)
   

    tar_entries = []

    manifest_file_path = os.path.join(self._scratch_dir, 'manifest.json')
    manifest_created = self._manifest.CreateIfNeeded(manifest_file_path)
    if manifest_created:
      tar_entries.append(manifest_file_path)

    tar_entries.append(disk_file_path)
    
    #TODO(feoff): make it parametrizable - if to start tar or not
   # logging.info('Creating tar.gz archive')
    utils.TarAndGzipFile(tar_entries,
                         self._output_tarfile)
    # Removed the deletion of file
    for tar_entry in tar_entries:
        if not self._output_tarfile == tar_entry:
            os.remove(tar_entry)

    # TODO(user): It would be better to compute tar.gz file hash during
    # archiving.
    h = hashlib.sha1()
    with open(self._output_tarfile, 'rb') as tar_file:
      for chunk in iter(lambda: tar_file.read(8192), ''):
        h.update(chunk)
    return (self._fs_size, h.hexdigest())
Exemplo n.º 5
0
    def Bundleup(self):
        """Creates a raw disk copy of OS image and bundles it into gzipped tar.

    Returns:
      A size of a generated raw disk and the SHA1 digest of the the tar archive.

    Raises:
      RawDiskError: If number of partitions in a created image doesn't match
                    expected count.
    """

        # Create sparse file with specified size
        disk_file_path = os.path.join(self._scratch_dir, 'disk.raw')
        with open(disk_file_path, 'wb') as _:
            pass
        self._excludes.append(exclude_spec.ExcludeSpec(disk_file_path))

        logging.info('Initializing disk file')
        partition_start = None
        uuid = None
        if self._disk:
            # If a disk device has been provided then preserve whatever is there on
            # the disk before the first partition in case there is an MBR present.
            partition_start, uuid = self._InitializeDiskFileFromDevice(
                disk_file_path)
        else:
            # User didn't specify a disk device. Initialize a device with a simple
            # partition table.
            self._ResizeFile(disk_file_path, self._fs_size)
            # User didn't specify a disk to copy. Create a new partition table
            utils.MakePartitionTable(disk_file_path)
            # Pass 1MB as start to avoid 'Warning: The resulting partition is not
            # properly aligned for best performance.' from parted.
            partition_start = 1024 * 1024

        # Create a new partition starting at partition_start of size
        # self._fs_size - partition_start
        utils.MakePartition(disk_file_path, 'primary', 'ext2', partition_start,
                            self._fs_size - partition_start)
        with utils.LoadDiskImage(disk_file_path) as devices:
            # For now we only support disks with a single partition.
            if len(devices) != 1:
                raise RawDiskError(devices)
            # List contents of /dev/mapper to help with debugging. Contents will
            # be listed in debug log only
            utils.RunCommand(['ls', '/dev/mapper'])
            logging.info('Making filesystem')
            uuid = utils.MakeFileSystem(devices[0], self._fs_type, uuid)
        with utils.LoadDiskImage(disk_file_path) as devices:
            if uuid is None:
                raise Exception('Could not get uuid from MakeFileSystem')
            mount_point = tempfile.mkdtemp(dir=self._scratch_dir)
            with utils.MountFileSystem(devices[0], mount_point):
                logging.info('Copying contents')
                self._CopySourceFiles(mount_point)
                self._CopyPlatformSpecialFiles(mount_point)
                self._ProcessOverwriteList(mount_point)
                self._CleanupNetwork(mount_point)
                self._UpdateFstab(mount_point, uuid)

        tar_entries = []

        manifest_file_path = os.path.join(self._scratch_dir, 'manifest.json')
        manifest_created = self._manifest.CreateIfNeeded(manifest_file_path)
        if manifest_created:
            tar_entries.append(manifest_file_path)

        tar_entries.append(disk_file_path)
        logging.info('Creating tar.gz archive')
        utils.TarAndGzipFile(tar_entries, self._output_tarfile)
        for tar_entry in tar_entries:
            os.remove(tar_entry)

        # TODO(user): It would be better to compute tar.gz file hash during
        # archiving.
        h = hashlib.sha1()
        with open(self._output_tarfile, 'rb') as tar_file:
            for chunk in iter(lambda: tar_file.read(8192), ''):
                h.update(chunk)
        return (self._fs_size, h.hexdigest())