Ejemplo n.º 1
0
	def _copyxattr(src, dest, exclude=None):

		try:
			attrs = _os.listxattr(src)
		except OSError as e:
			if e.errno != OperationNotSupported.errno:
				raise
			attrs = ()
		if attrs:
			if exclude is not None and isinstance(attrs[0], bytes):
				exclude = exclude.encode(_encodings['fs'])
			exclude = _get_xattr_excluder(exclude)

		for attr in attrs:
			if exclude(attr):
				continue
			try:
				_os.setxattr(dest, attr, _os.getxattr(src, attr))
				raise_exception = False
			except OSError:
				raise_exception = True
			if raise_exception:
				raise OperationNotSupported(_("Filesystem containing file '%s' "
					"does not support extended attribute '%s'") %
					(_unicode_decode(dest), _unicode_decode(attr)))
Ejemplo n.º 2
0
 def set_file_atts(file: str, file_atts: list):
     for att_ctr in range(Backup.att_number):
         os.setxattr(file,
                     Backup.att_sum + '_' + str(att_ctr),
                     bytes(file_atts[0], 'utf-8'))
         os.setxattr(file,
                     Backup.att_time + '_' + str(att_ctr),
                     bytes(file_atts[1], 'utf-8'))
Ejemplo n.º 3
0
	def _copyxattr(src, dest):
		for attr in _os.listxattr(src):
			try:
				_os.setxattr(dest, attr, _os.getxattr(src, attr))
				raise_exception = False
			except OSError:
				raise_exception = True
			if raise_exception:
				raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
Ejemplo n.º 4
0
    def copyxattr(src, dest):
        """
        Copy the extended attributes (xattr) from `src` to `dest`.

        NOTE: xattr only available on Linux.
        """
        if not hasattr(os, "listxattr"):
            return
        for name in os.listxattr(src):
            value = os.getxattr(src, name)
            os.setxattr(dest, name, value)
Ejemplo n.º 5
0
Archivo: misc.py Proyecto: grawity/code
def set_file_attr(path, attr, value):
    try:
        if hasattr(path, "fileno"):
            path = path.fileno()
        if hasattr(value, "encode"):
            value = value.encode("utf-8")
        os.setxattr(path, "user.%s" % attr, value)
    except FileNotFoundError:
        raise
    except OSError:
        return
Ejemplo n.º 6
0
 def _copyxattr(src, dst, *, follow_symlinks=True):
     try:
         names = os.listxattr(src, follow_symlinks=follow_symlinks)
     except OSError as e:
         if e.errno not in (errno.ENOTSUP, errno.ENODATA):
             raise
         return
     for name in names:
         try:
             value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
             os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
         except OSError as e:
             while e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
                 raise
Ejemplo n.º 7
0
	def patch_node(p):
		# getxattr/setxattr work weirdly for broken symlinks
		# glusterd produces these sometimes, seem to be expendable
		try: bugged = p.is_symlink() and not p.exists()
		except OSError: bugged = True
		if bugged:
			if not dry_run: p.unlink()
			return
		for k in os.listxattr(p, follow_symlinks=False):
			if not k.startswith('trusted.'): continue
			k_user = '******'.format(k[8:])
			v = os.getxattr(p, k, follow_symlinks=False)
			log.debug(f'patch: {p} :: {k} -> {k_user} [{v!r}]')
			if not dry_run: os.setxattr(p, k_user, v, follow_symlinks=False)
Ejemplo n.º 8
0
def set_file_attr(path, attr, value):
    try:
        if hasattr(path, "fileno"):
            path = path.fileno()
        if hasattr(value, "encode"):
            value = value.encode("utf-8")
        if value:
            os.setxattr(path, "user.%s" % attr, value)
        else:
            os.removexattr(path, "user.%s" % attr)
    except FileNotFoundError:
        raise
    except OSError:
        return
Ejemplo n.º 9
0
def fetch_image(url, cache, label):
    """
    Fetch an image from url into the cache with label.

    Fetches an image from @url into @cache as @label if a file with the
    same name downloaded from the same URL doesn't yet exist. There is
    no need for fancier caching, as image URLs are unique enough.

    Labels are not unique enough, because the URL corresponding to
    the label may get updated. And using a filename derived from URL
    would lead to leftover image files filling up the cache directory,
    as nobody would delete them when the URL changes.

    Returns the full path to the image.
    """

    original_name = os.path.basename(urllib.parse.urlparse(url).path)
    nameroot, suffix = os.path.splitext(original_name)
    image_name = label + suffix
    path = os.path.join(cache, image_name)
    image_last_modified_by_src = get_metadata_from_url(url, "Last-Modified")
    image_last_modified_by_file = image_source_last_modified_by_file_metadata(
        path)

    if (not os.path.exists(path) or url != origurl(path)
            or image_last_modified_by_src != image_last_modified_by_file):
        logging.info("Fetch url %s for %s", url, image_name)

        image_tempfile = tempfile.NamedTemporaryFile(dir=cache, delete=False)
        try:
            request = urllib.request.urlopen(url)  # nosec
            shutil.copyfileobj(request, image_tempfile)
            request.close()
        except Exception:  # pylint: disable=broad-except
            logging.warning(traceback.format_exc())
            os.unlink(image_tempfile.name)
            return None

        os.setxattr(image_tempfile.name, URL_XATTR, os.fsencode(url))
        os.setxattr(
            image_tempfile.name,
            DATE_XATTR,
            os.fsencode(image_last_modified_by_src),
        )
        os.rename(image_tempfile.name, path)
    else:
        logging.info("Using cached image %s for %s", path, image_name)

    return path
Ejemplo n.º 10
0
    def set_path_remote_id(path: Path,
                           remote_id: Union[bytes, str],
                           name: str = "ndrive") -> None:
        if not isinstance(remote_id, bytes):
            remote_id = unicodedata.normalize("NFC", remote_id).encode("utf-8")

        locker = unlock_path(path, False)
        try:
            stat_ = path.stat()
            os.setxattr(path, f"user.{name}", remote_id)  # type: ignore
            os.utime(path, (stat_.st_atime, stat_.st_mtime))
        except FileNotFoundError:
            pass
        finally:
            lock_path(path, locker)
Ejemplo n.º 11
0
    def setxattr(self, path, name, value, options, position=0):
        if path not in self.files:
            raise FuseOSError(ENOENT)

        tempfile = self.files[path]
        tempfile.setdefault('attrs', {})
        tempfile['attrs'][name] = value
        self.files[path] = tempfile

        real_path = self._realpath(path)
        if os.path.isfile(real_path):
            try:
                os.setxattr(real_path, name, value, options)
            except Exception:
                pass
Ejemplo n.º 12
0
    def _copyxattr(src, dst, *, follow_symlinks=True):
        """Copy extended filesystem attributes from `src` to `dst`.

        Overwrite existing attributes.

        If `follow_symlinks` is false, symlinks won't be followed.

        """

        for name in os.listxattr(src, follow_symlinks=follow_symlinks):
            try:
                value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
                os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
            except OSError as e:
                if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
                    raise
Ejemplo n.º 13
0
 def scan(self):
     if exists("save.p"):
         self.cache = pickle.load(open( "save.p", "rb" ))
     for root, dirs, files in os.walk('/home/mikael/Music/Ablaze my sorrow'):
         for name in files:
             if name.startswith('.'):
                 continue
             with open(join(root, name),'rb') as fp:
                 print(fp.name)
                 #if 'user.sha256' not in attrs:
                 #    h = hashlib.sha256(fp.read())
                 #    sum = h.digest()
                 #    attrs['user.sha256'] = sum
                 modified = os.stat(fp.fileno()).st_mtime
                 if fp.name in self.cache:
                     if 'modified' in self.cache[fp.name]:
                         print(modified, self.cache[fp.name]['modified'])
                         if modified <= self.cache[fp.name]['modified']:
                             print("Optimized!")
                             continue
                 try:
                     rid = os.getxattr(fp.fileno(),'user.musicbrainz.recordingid').decode('utf-8')
                 except Exception as e:
                     rid = False
                 #    #import pdb;pdb.set_trace()
                 if rid:
                     self.cache["mbid:"+rid] = fp.name
                     self.cache[fp.name] = {}
                     self.cache[fp.name]['modified'] = modified
                     continue
                 else:
                     try:
                         metadata = mutagen.File(fp.name,easy=True)
                     except Exception as e:
                         metadata = False
                 if not metadata:
                     print("Missing!")
                     continue
                 if 'musicbrainz_trackid' in metadata:
                     os.setxattr(fp.fileno(),'user.musicbrainz.recordingid',metadata['musicbrainz_trackid'][0].encode('utf-8'))
                     self.cache[metadata['musicbrainz_trackid'][0]] = fp.name
                     if not fp.name in self.cache:
                         self.cache[fp.name] = {}
                     self.cache[fp.name]['modified'] = int(time.time())
                     #import pdb;pdb.set_trace()
     for key in cache:
         self.dht.set(key,"http://185.97.32.250:8468/by-mbid/"+key)
Ejemplo n.º 14
0
    def __setitem__(self, k: str, v: bytes) -> None:
        if not isinstance(k, KEY_TYPES):
            raise TypeError(f"Xattr keys must be str, not {type(k).__name__}")
        if not isinstance(v, bytes):
            raise TypeError(
                f"Xattr values must be bytes, not {type(k).__name__}")

        return setxattr(self.path, k, v, follow_symlinks=self.follow_symlinks)
Ejemplo n.º 15
0
def post_run(self):
	bld = self.generator.bld
	sig = self.signature()
	for node in self.outputs:
		if not node.exists():
			self.hasrun = MISSING
			self.err_msg = '-> missing file: %r' % node.abspath()
			raise Errors.WafError(self.err_msg)
		os.setxattr(node.abspath(), 'user.waf.sig', sig)
	if not self.outputs:
		# only for task with no outputs
		bld.task_sigs[self.uid()] = sig
	if not self.keep_last_cmd:
		try:
			del self.last_cmd
		except AttributeError:
			pass
Ejemplo n.º 16
0
 def setxattr(self, path, name, value, options, position=0):
     full_path = self._full_path(path)
     print("getxattr "+str(self)+" path="+full_path+" name="+name+" value="+str(value)+" options="+str(options))
     if name == "user.owncloud.virtual":
         (uid, gid, pid) = fuse_get_context()	# CAUTION: Thread safe? be in self..., no?
         print("getxattr not impl. uid,gid,pid = ", uid, gid, pid)
         raise FuseOSError(errno.EREMOTE)        # not impl. actually :-)
     return os.setxattr(full_path, name, value, flags=options)
Ejemplo n.º 17
0
def post_run(self):
    bld = self.generator.bld
    sig = self.signature()
    for node in self.outputs:
        if not node.exists():
            self.hasrun = MISSING
            self.err_msg = "-> missing file: %r" % node.abspath()
            raise Errors.WafError(self.err_msg)
        os.setxattr(node.abspath(), "user.waf.sig", sig)
    if not self.outputs:
        # only for task with no outputs
        bld.task_sigs[self.uid()] = sig
    if not self.keep_last_cmd:
        try:
            del self.last_cmd
        except AttributeError:
            pass
Ejemplo n.º 18
0
def test_setxattr(setup_unreliablefs, symlink):
    mnt_dir, src_dir = setup_unreliablefs
    name = name_generator()
    src_name = pjoin(src_dir, name)
    mnt_name = pjoin(src_dir, name)
    os_create(mnt_name)
    linkname = name_generator()
    link_path = os.path.join(mnt_dir, linkname)
    os.symlink(mnt_dir, link_path)
    if symlink:
        target = link_path
    else:
        target = mnt_name

    attr_value = b"unreliablefs"
    attr_name = b"user.fsname"

    os.setxattr(target, attr_name, attr_value)
    assert attr_name.decode("utf-8") in os.listxattr(target)
Ejemplo n.º 19
0
def _write_snapcraft_xattr(path: str, snapcraft_key: str, value: str) -> None:
    if sys.platform != "linux":
        raise RuntimeError("xattr support only available for Linux")

    # Extended attributes do not apply to symlinks.
    if os.path.islink(path):
        return

    key = _get_snapcraft_xattr_key(snapcraft_key)

    try:
        os.setxattr(path, key, value.encode())
    except OSError as error:
        # Label is too long for filesystem:
        # OSError: [Errno 7] Argument list too long: b'<path>'
        if error.errno == 7:
            raise XAttributeTooLongError(path=path, key=key, value=value)

        # Chain unknown variants of OSError.
        raise XAttributeError(action="write", key=key, path=path) from error
Ejemplo n.º 20
0
def can_xattr():
    import tempfile
    global _can_xattr
    if _can_xattr is not None:
        return _can_xattr
    if not hasattr(os, "setxattr"):
        can = False
    else:
        import platform
        tmp_dir = tempfile.mkdtemp()
        tmp_fp, tmp_name = tempfile.mkstemp(dir=tmp_dir)
        try:
            with open(TESTFN, "wb") as fp:
                try:
                    # TESTFN & tempfile may use different file systems with
                    # different capabilities
                    os.setxattr(tmp_fp, b"user.test", b"")
                    os.setxattr(tmp_name, b"trusted.foo", b"42")
                    os.setxattr(fp.fileno(), b"user.test", b"")
                    # Kernels < 2.6.39 don't respect setxattr flags.
                    kernel_version = platform.release()
                    m = re.match(r"2.6.(\d{1,2})", kernel_version)
                    can = m is None or int(m.group(1)) >= 39
                except OSError:
                    can = False
        finally:
            unlink(TESTFN)
            unlink(tmp_name)
            rmdir(tmp_dir)
    _can_xattr = can
    return can
Ejemplo n.º 21
0
def test_xattr():
    with tempfile.NamedTemporaryFile() as fh:
        key = 'user.new_attribute'
        assert _getxattr_helper(fh.name, key) is None
        value = b'a nice little bytestring'

        try:
            llfuse.setxattr(fh.name, key, value)
        except OSError as exc:
            if exc.errno == errno.ENOTSUP:
                pytest.skip('ACLs not supported for %s' % fh.name)
            raise
        assert _getxattr_helper(fh.name, key) == value

        if not hasattr(os, 'setxattr'):
            return

        key = 'user.another_new_attribute'
        assert _getxattr_helper(fh.name, key) is None
        value = b'a nice little bytestring, but slightly modified'
        os.setxattr(fh.name, key, value)
        assert _getxattr_helper(fh.name, key) == value
Ejemplo n.º 22
0
def test_xattr():
    with tempfile.NamedTemporaryFile() as fh:
        key = 'user.new_attribute'
        assert _getxattr_helper(fh.name, key) is None
        value = b'a nice little bytestring'

        try:
            llfuse.setxattr(fh.name, key, value)
        except OSError as exc:
            if exc.errno == errno.ENOTSUP:
                pytest.skip('ACLs not supported for %s' % fh.name)
            raise
        assert _getxattr_helper(fh.name, key) == value

        if not hasattr(os, 'setxattr'):
            return

        key = 'user.another_new_attribute'
        assert _getxattr_helper(fh.name, key) is None
        value = b'a nice little bytestring, but slightly modified'
        os.setxattr(fh.name, key, value)
        assert _getxattr_helper(fh.name, key) == value
Ejemplo n.º 23
0
def _setxattr(path: str, key: _KT = None):
    """Set file object attributes.

    :param path: File path
    :type path: String
    :param key: Key information
    :type key: String
    :returns: Boolean
    """
    try:
        try:
            os.getxattr(path, "user.birthtime")
        except OSError:
            os.setxattr(
                path,
                "user.birthtime",
                struct.pack(">d", time.time()),
            )
    except OSError:
        pass
    else:
        if key:
            os.setxattr(path, "user.key", key.encode())
Ejemplo n.º 24
0
def setfilecon(path: str, context: str) -> None:
    """
    Set the security context associated with `path`

    Like `setfilecon`(3), but does not attempt to translate
    the context via `selinux_trans_to_raw_context`.
    """

    try:
        os.setxattr(path,
                    XATTR_NAME_SELINUX,
                    context.encode(),
                    follow_symlinks=True)
    except OSError as err:
        # in case we get a not-supported error, check if
        # the context we want to set is already set and
        # ignore the error in that case. This follows the
        # behavior of `setfilecon(3)`.
        if err.errno == errno.ENOTSUP:
            have = getfilecon(path)
            if have == context:
                return
        raise
Ejemplo n.º 25
0
def setxattr(args):
    xattr_name = args.xattr_name.encode("ascii")
    for root, dirs, files in os.walk(args.basedir, topdown=True):
        for filename in sorted(files):
            if not filename.endswith((".py", ".pyc")):
                continue
            filename = os.path.join(root, filename)
            hasher = hashlib.new(args.hash)
            with open(filename, "rb") as f:
                hasher.update(f.read())
                hexdigest = hasher.hexdigest().encode("ascii")
                try:
                    value = os.getxattr(f.fileno(), xattr_name)
                except OSError:
                    value = None
                if value != hexdigest:
                    if args.verbose:
                        if value is None:
                            print(f"Adding spython hash to '{filename}'")
                        else:
                            print(f"Updating spython hash of '{filename}'")
                    # it's likely that the pyc file is also out of sync
                    compileall.compile_file(filename, quiet=2)
                    os.setxattr(filename, xattr_name, hexdigest)
Ejemplo n.º 26
0
 def setxattr(self, path, name, value, options, position=0):
     rpath, virt = self._oc_path(path)
     if name == "user.owncloud.virtual" and not self._be_transparent():
         if value == b'0' or value == b'':
             if virt:
                 self._convert_v2p(rpath)
             else:
                 print(
                     "+ setxattr nothing to do. path is already physical: "
                     + rpath,
                     file=sys.stderr)
         else:
             if virt:
                 print(
                     "+ setxattr nothing to do. path is already virtual: " +
                     rpath,
                     file=sys.stderr)
             else:
                 self._convert_p2v(rpath)
         return 0
     return os.setxattr(rpath, name, value, flags=options)
Ejemplo n.º 27
0
def SetMaxRevisions (fname, max_revisions):
    os.setxattr (fname, xattr_max_revisions_name, str (max_revisions).encode ('ASCII'), follow_symlinks=False)
Ejemplo n.º 28
0
import os
from selfdrive.loggerd.uploader import UPLOAD_ATTR_NAME, UPLOAD_ATTR_VALUE

from selfdrive.loggerd.config import ROOT
for folder in os.walk(ROOT):
    for file1 in folder[2]:
        full_path = os.path.join(folder[0], file1)
        os.setxattr(full_path, UPLOAD_ATTR_NAME, UPLOAD_ATTR_VALUE)
Ejemplo n.º 29
0
def build(args):
    context = Path(args.path).resolve()
    dockerfile = Path(args.file)
    if not dockerfile.is_absolute():
        dockerfile = context / dockerfile
    dockerfile = dockerfile.resolve()
    if not dockerfile.is_file():
        raise FileNotFoundError("{} does not exist".format(dockerfile))

    runtime = Path(args.runtime).resolve()
    r = ImageStorage(runtime)

    df = DockerfileParser(dockerfile)
    if not df.build_commands:
        print("Nothing to do")
        return

    if args.env:
        df.add_env_variables(args.env)

    #  Locate base image for this dockerfile
    parent_hash = ""
    if df.from_image != "scratch":
        parent_hash = r.find_last_build_by_name(df.from_image)
        if not parent_hash:
            raise FileNotFoundError("Image with name {} not found".format(
                df.from_image))
        print("Using parent image {}".format(parent_hash[:16]))

    #  Update build hashes based on base image
    df.calc_build_hashes(parent_hash=parent_hash)
    total_build_steps = len(df.build_commands)

    #  Early exit if image is already built
    if not args.no_cache and args.rm:
        if (runtime / df.build_hashes[-1]).exists():
            print("==> Already built {}".format(df.build_hashes[-1][:16]))
            return

    #  Do the build
    for current_build_step, (cmd, cmdargs) in enumerate(df.build_commands):
        build_step_hash = df.build_hashes[current_build_step]

        print("==> Building step {}/{} {}".format(current_build_step + 1,
                                                  total_build_steps,
                                                  build_step_hash[:16]))

        target = runtime / (build_step_hash + "-init")
        final_target = runtime / build_step_hash
        host_env = {"TARGET": str(target), "CONTEXT": str(context)}
        host_env.update(df.env)

        ## parent image checks
        if final_target.exists():
            if args.no_cache:
                btrfs_subvol_delete(final_target)
            else:
                previous_parent_hash = ""
                try:
                    previous_parent_hash = os.getxattr(
                        str(final_target), b"user.parent_hash").decode()
                except:
                    pass
                if parent_hash and parent_hash != previous_parent_hash:
                    print("  -> parent image hash changed")
                    btrfs_subvol_delete(final_target)
                else:
                    print("  -> Using cached image")
                    parent_hash = build_step_hash
                    continue

        if target.exists():
            print("  -> Deleting incomplete image")
            btrfs_subvol_delete(target)

        if parent_hash:
            btrfs_subvol_snapshot(runtime / parent_hash, target)
        else:
            btrfs_subvol_create(target)

        ## Run build step
        if cmd == "host":
            print('  -> HOST {}'.format(cmdargs))
            subprocess.run(cmdargs,
                           cwd=str(context),
                           check=True,
                           shell=True,
                           env=host_env)

        elif cmd == "run":
            print('  -> RUN {}'.format(cmdargs))
            nspawn_cmd = ['systemd-nspawn', '--quiet']
            for key, val in df.env.items():
                nspawn_cmd.extend(('--setenv', '{}={}'.format(key, val)))
            nspawn_cmd.extend(
                ('--register=no', '-D', str(target), '/bin/sh', '-c', cmdargs))
            subprocess.run(nspawn_cmd,
                           cwd=str(target),
                           check=True,
                           shell=False,
                           env=df.env)

        elif cmd == "copy":
            print("  -> COPY {}".format(cmdargs))
            *srcs, dest = shlex.split(cmdargs)
            if Path(dest).is_absolute():
                dest = target / dest[1:]
            else:
                dest = target / dest
            if len(srcs) > 1 and not dest.is_dir():
                raise NotADirectoryError("Destination must be a directory")
            cmd = ['cp', '-rv']
            cmd.extend(srcs)
            cmd.append(str(dest))
            subprocess.run(cmd,
                           cwd=str(context),
                           check=True,
                           shell=False,
                           env=host_env)

        ## Seal build image
        os.setxattr(str(target), b"user.parent_hash", parent_hash.encode())
        for attr in ("user.cmd.host", "user.cmd.run"):
            try:
                os.removexattr(str(target), attr.encode())
            except:
                pass
        os.setxattr(str(target), "user.cmd.{}".format(cmd).encode(),
                    cmdargs.encode())

        btrfs_subvol_snapshot(target, final_target, readonly=True)
        btrfs_subvol_delete(target)

        parent_hash = build_step_hash

    #  After build cleanup
    if args.rm:
        print("==> Cleanup")
        for build_hash in df.build_hashes[:-1]:
            target = runtime / build_hash
            if target.exists():
                print("  -> Remove intermediate image {}".format(
                    build_hash[:16]))
                btrfs_subvol_delete(target)

    print("==> Successfully built {}".format(parent_hash[:16]))

    if args.tag:
        tmp_tag = runtime / ("tag-" + args.tag + "-tmp")
        if tmp_tag.exists():
            os.unlink(str(tmp_tag))
        os.symlink(str(runtime / parent_hash), str(tmp_tag))
        os.replace(str(tmp_tag), str(runtime / ("tag-" + args.tag)))
        print("==> Tagged image {} as {}".format(parent_hash[:16], args.tag))
Ejemplo n.º 30
0
 def set(item, name, value, flags=0, namespace=None):
     return os.setxattr(item, name, value, flags=flags)
Ejemplo n.º 31
0
        dest='verbose',
        default=VERBOSE,
        type=int,
        help='verbosity control (0 = none, 1 = normal)')

    args = parser.parse_args()

    try:
        assert args.shelf_name.startswith('/lfs/'), 'Not an LFS file'
        assert not os.path.isfile(args.shelf_name), '%s exists' % args.shelf_name
        APs = os.getxattr('/lfs', 'user.LFS.AllocationPolicyList').decode().split(',')
    except Exception as e:
        raise SystemExit('Bad initial conditions: %s' % str(e))
    for ap in APs:
        try:
            os.setxattr('/lfs', 'user.LFS.AllocationPolicyDefault', ap.encode())
            with open(args.shelf_name, 'w') as f:
            	os.ftruncate(f.fileno(), 1)
            thisap = os.getxattr(args.shelf_name, 'user.LFS.AllocationPolicy')
            assert ap == thisap.decode(), 'Policy mismatch: %s' % ap
            os.unlink(args.shelf_name)
        except Exception as e:
            if isinstance(e, OSError) and e.errno == errno.EINVAL:
                if ap in ('RequestIG', ):
                    continue
            raise SystemExit('Error during policy walkthrough: %s' % str(e))

    try:
        with open(args.shelf_name, 'w') as f:	# need a test file
            os.ftruncate(f.fileno(), 1)
        os.setxattr(args.shelf_name, VERIF_EA_NAME, b'asdfasdfasdfkajsd;flijasd;fiads;fui')
Ejemplo n.º 32
0
def SetMinRevisionsAge (fname, min_revisions_age):
    os.setxattr (fname, xattr_min_revisions_age, str (min_revisions_age).encode ('ASCII'), follow_symlinks=False)
Ejemplo n.º 33
0
 def set_file_attribute(file: str, att_name: str, att_val: bytes):
     os.setxattr(file, att_name, att_val)
Ejemplo n.º 34
0
    def copy_all(self):
        """Core copy process. This is the most important step of this
        stage. It clones live filesystem into a local partition in the
        selected hard disk."""

        self.db.progress('START', 0, 100, 'ubiquity/install/title')
        self.db.progress('INFO', 'ubiquity/install/copying')

        fs_size = os.path.join(self.casper_path, 'filesystem.size')
        if os.path.exists(fs_size):
            with open(fs_size) as total_size_fp:
                total_size = int(total_size_fp.readline())
        else:
            # Fallback in case an Ubuntu derivative forgets to put
            # /casper/filesystem.size on the CD, or to account for things
            # like CD->USB transformation tools that don't copy this file.
            # This is slower than just reading the size from a file, but
            # better than crashing.
            #
            # Obviously doing os.walk() twice is inefficient, but I'd rather
            # not suck the list into ubiquity's memory, and I'm guessing
            # that the kernel's dentry cache will avoid most of the slowness
            # anyway.
            total_size = 0
            for dirpath, dirnames, filenames in os.walk(self.source):
                for name in dirnames + filenames:
                    fqpath = os.path.join(dirpath, name)
                    total_size += os.lstat(fqpath).st_size

        # Progress bar handling:
        # We sample progress every half-second (assuming time.time() gives
        # us sufficiently good granularity) and use the average of progress
        # over the last minute or so to decide how much time remains. We
        # don't bother displaying any progress for the first ten seconds in
        # order to allow things to settle down, and we only update the "time
        # remaining" indicator at most every two seconds after that.

        copy_progress = 0
        copied_size = 0
        directory_times = []
        time_start = time.time()
        times = [(time_start, copied_size)]
        long_enough = False
        time_last_update = time_start
        debug = 'UBIQUITY_DEBUG' in os.environ
        if self.db.get('ubiquity/install/md5_check') == 'false':
            md5_check = False
        else:
            md5_check = True

        # Increase kernel flush times during bulk data copying to make it
        # more likely that small files are packed contiguously, which should
        # speed up initial boot times.
        dirty_writeback_centisecs = None
        dirty_expire_centisecs = None
        if os.path.exists('/proc/sys/vm/dirty_writeback_centisecs'):
            with open('/proc/sys/vm/dirty_writeback_centisecs') as dwc:
                dirty_writeback_centisecs = int(dwc.readline())
            with open('/proc/sys/vm/dirty_writeback_centisecs', 'w') as dwc:
                print('3000\n', file=dwc)
        if os.path.exists('/proc/sys/vm/dirty_expire_centisecs'):
            with open('/proc/sys/vm/dirty_expire_centisecs') as dec:
                dirty_expire_centisecs = int(dec.readline())
            with open('/proc/sys/vm/dirty_expire_centisecs', 'w') as dec:
                print('6000\n', file=dec)

        old_umask = os.umask(0)
        for dirpath, dirnames, filenames in os.walk(self.source):
            sp = dirpath[len(self.source) + 1:]
            for name in dirnames + filenames:
                relpath = os.path.join(sp, name)
                # /etc/fstab was legitimately created by partman, and
                # shouldn't be copied again.  Similarly, /etc/crypttab may
                # have been legitimately created by the user-setup plugin.
                if relpath in ("etc/fstab", "etc/crypttab"):
                    continue
                sourcepath = os.path.join(self.source, relpath)
                targetpath = os.path.join(self.target, relpath)
                st = os.lstat(sourcepath)

                # Is the path blacklisted?
                if (not stat.S_ISDIR(st.st_mode)
                        and '/%s' % relpath in self.blacklist):
                    if debug:
                        syslog.syslog('Not copying %s' % relpath)
                    continue

                # Remove the target if necessary and if we can.
                install_misc.remove_target(self.source, self.target, relpath,
                                           st)

                # Now actually copy source to target.
                mode = stat.S_IMODE(st.st_mode)
                if stat.S_ISLNK(st.st_mode):
                    linkto = os.readlink(sourcepath)
                    os.symlink(linkto, targetpath)
                elif stat.S_ISDIR(st.st_mode):
                    if not os.path.isdir(targetpath):
                        try:
                            os.mkdir(targetpath, mode)
                        except OSError as e:
                            # there is a small window where update-apt-cache
                            # can race with us since it creates
                            # "/target/var/cache/apt/...". Hence, ignore
                            # failure if the directory does now exist where
                            # brief moments before it didn't.
                            if e.errno != errno.EEXIST:
                                raise
                elif stat.S_ISCHR(st.st_mode):
                    os.mknod(targetpath, stat.S_IFCHR | mode, st.st_rdev)
                elif stat.S_ISBLK(st.st_mode):
                    os.mknod(targetpath, stat.S_IFBLK | mode, st.st_rdev)
                elif stat.S_ISFIFO(st.st_mode):
                    os.mknod(targetpath, stat.S_IFIFO | mode)
                elif stat.S_ISSOCK(st.st_mode):
                    os.mknod(targetpath, stat.S_IFSOCK | mode)
                elif stat.S_ISREG(st.st_mode):
                    install_misc.copy_file(self.db, sourcepath, targetpath,
                                           md5_check)

                # Copy metadata.
                copied_size += st.st_size
                os.lchown(targetpath, st.st_uid, st.st_gid)
                if not stat.S_ISLNK(st.st_mode):
                    os.chmod(targetpath, mode)
                if stat.S_ISDIR(st.st_mode):
                    directory_times.append(
                        (targetpath, st.st_atime, st.st_mtime))
                # os.utime() sets timestamp of target, not link
                elif not stat.S_ISLNK(st.st_mode):
                    try:
                        os.utime(targetpath, (st.st_atime, st.st_mtime))
                    except Exception:
                        # We can live with timestamps being wrong.
                        pass
                if (hasattr(os, "listxattr")
                        and hasattr(os, "supports_follow_symlinks")
                        and os.supports_follow_symlinks):
                    try:
                        attrnames = os.listxattr(sourcepath,
                                                 follow_symlinks=False)
                        for attrname in attrnames:
                            attrvalue = os.getxattr(sourcepath,
                                                    attrname,
                                                    follow_symlinks=False)
                            os.setxattr(targetpath,
                                        attrname,
                                        attrvalue,
                                        follow_symlinks=False)
                    except OSError as e:
                        if e.errno not in (errno.EPERM, errno.ENOTSUP,
                                           errno.ENODATA):
                            raise

                if int((copied_size * 90) / total_size) != copy_progress:
                    copy_progress = int((copied_size * 90) / total_size)
                    self.db.progress('SET', 10 + copy_progress)

                time_now = time.time()
                if (time_now - times[-1][0]) >= 0.5:
                    times.append((time_now, copied_size))
                    if not long_enough and time_now - times[0][0] >= 10:
                        long_enough = True
                    if long_enough and time_now - time_last_update >= 2:
                        time_last_update = time_now
                        while (time_now - times[0][0] > 60
                               and time_now - times[1][0] >= 60):
                            times.pop(0)
                        speed = ((times[-1][1] - times[0][1]) /
                                 (times[-1][0] - times[0][0]))
                        if speed != 0:
                            time_remaining = (int(
                                (total_size - copied_size) / speed))
                            if time_remaining < 60:
                                self.db.progress(
                                    'INFO', 'ubiquity/install/copying_minute')

        # Apply timestamps to all directories now that the items within them
        # have been copied.
        for dirtime in directory_times:
            (directory, atime, mtime) = dirtime
            try:
                os.utime(directory, (atime, mtime))
            except Exception:
                # I have no idea why I've been getting lots of bug reports
                # about this failing, but I really don't care. Ignore it.
                pass

        # Revert to previous kernel flush times.
        if dirty_writeback_centisecs is not None:
            with open('/proc/sys/vm/dirty_writeback_centisecs', 'w') as dwc:
                print(dirty_writeback_centisecs, file=dwc)
        if dirty_expire_centisecs is not None:
            with open('/proc/sys/vm/dirty_expire_centisecs', 'w') as dec:
                print(dirty_expire_centisecs, file=dec)

        # Try some possible locations for the kernel we used to boot. This
        # lets us save a couple of megabytes of CD space.
        bootdir = self.target_file('boot')
        kernel = self.find_cd_kernel()
        if kernel:
            prefix = os.path.basename(kernel).split('-', 1)[0]
            release = os.uname()[2]
            target_kernel = os.path.join(bootdir, '%s-%s' % (prefix, release))
            copies = []

            # ISO9660 images may have to use .efi rather than .efi.signed in
            # order to support being booted using isolinux, which must abide
            # by archaic 8.3 restrictions.
            for suffix in (".efi", ".efi.signed"):
                if os.path.exists(kernel + suffix):
                    signed_kernel = kernel + suffix
                    break
            else:
                signed_kernel = None

            if os.path.exists(kernel):
                copies.append((kernel, target_kernel))
            elif signed_kernel is not None:
                # No unsigned kernel.  We'll construct it using sbsigntool.
                copies.append((signed_kernel, target_kernel))

            if signed_kernel is not None:
                copies.append((signed_kernel, "%s.efi.signed" % target_kernel))

            for source, target in copies:
                osextras.unlink_force(target)
                install_misc.copy_file(self.db, source, target, md5_check)
                os.lchown(target, 0, 0)
                os.chmod(target, 0o644)
                st = os.lstat(source)
                try:
                    os.utime(target, (st.st_atime, st.st_mtime))
                except Exception:
                    # We can live with timestamps being wrong.
                    pass

            if not os.path.exists(kernel) and signed_kernel is not None:
                # Construct the unsigned kernel.
                subprocess.check_call(["sbattach", "--remove", target_kernel])

        os.umask(old_umask)

        self.db.progress('SET', 100)
        self.db.progress('STOP')
Ejemplo n.º 35
0
 def set_attribute(self, path, attr_name, attr_value):
     os.setxattr(self.full_path(path), attr_name, attr_value)
Ejemplo n.º 36
0
    def copy_all(self):
        """Core copy process. This is the most important step of this
        stage. It clones live filesystem into a local partition in the
        selected hard disk."""

        self.db.progress('START', 0, 100, 'ubiquity/install/title')
        self.db.progress('INFO', 'ubiquity/install/copying')

        fs_size = os.path.join(self.casper_path, 'filesystem.size')
        if os.path.exists(fs_size):
            with open(fs_size) as total_size_fp:
                total_size = int(total_size_fp.readline())
        else:
            # Fallback in case an Linux Mint derivative forgets to put
            # /casper/filesystem.size on the CD, or to account for things
            # like CD->USB transformation tools that don't copy this file.
            # This is slower than just reading the size from a file, but
            # better than crashing.
            #
            # Obviously doing os.walk() twice is inefficient, but I'd rather
            # not suck the list into ubiquity's memory, and I'm guessing
            # that the kernel's dentry cache will avoid most of the slowness
            # anyway.
            total_size = 0
            for dirpath, dirnames, filenames in os.walk(self.source):
                for name in dirnames + filenames:
                    fqpath = os.path.join(dirpath, name)
                    total_size += os.lstat(fqpath).st_size

        # Progress bar handling:
        # We sample progress every half-second (assuming time.time() gives
        # us sufficiently good granularity) and use the average of progress
        # over the last minute or so to decide how much time remains. We
        # don't bother displaying any progress for the first ten seconds in
        # order to allow things to settle down, and we only update the "time
        # remaining" indicator at most every two seconds after that.

        copy_progress = 0
        copied_size = 0
        directory_times = []
        time_start = time.time()
        times = [(time_start, copied_size)]
        long_enough = False
        time_last_update = time_start
        debug = 'UBIQUITY_DEBUG' in os.environ
        if self.db.get('ubiquity/install/md5_check') == 'false':
            md5_check = False
        else:
            md5_check = True

        # Increase kernel flush times during bulk data copying to make it
        # more likely that small files are packed contiguously, which should
        # speed up initial boot times.
        dirty_writeback_centisecs = None
        dirty_expire_centisecs = None
        if os.path.exists('/proc/sys/vm/dirty_writeback_centisecs'):
            with open('/proc/sys/vm/dirty_writeback_centisecs') as dwc:
                dirty_writeback_centisecs = int(dwc.readline())
            with open('/proc/sys/vm/dirty_writeback_centisecs', 'w') as dwc:
                print('3000\n', file=dwc)
        if os.path.exists('/proc/sys/vm/dirty_expire_centisecs'):
            with open('/proc/sys/vm/dirty_expire_centisecs') as dec:
                dirty_expire_centisecs = int(dec.readline())
            with open('/proc/sys/vm/dirty_expire_centisecs', 'w') as dec:
                print('6000\n', file=dec)

        old_umask = os.umask(0)
        for dirpath, dirnames, filenames in os.walk(self.source):
            sp = dirpath[len(self.source) + 1:]
            for name in dirnames + filenames:
                relpath = os.path.join(sp, name)
                # /etc/fstab was legitimately created by partman, and
                # shouldn't be copied again.  Similarly, /etc/crypttab may
                # have been legitimately created by the user-setup plugin.
                if relpath in ("etc/fstab", "etc/crypttab"):
                    continue
                sourcepath = os.path.join(self.source, relpath)
                targetpath = os.path.join(self.target, relpath)
                st = os.lstat(sourcepath)

                # Is the path blacklisted?
                if (not stat.S_ISDIR(st.st_mode) and
                        '/%s' % relpath in self.blacklist):
                    if debug:
                        syslog.syslog('Not copying %s' % relpath)
                    continue

                # Remove the target if necessary and if we can.
                install_misc.remove_target(
                    self.source, self.target, relpath, st)

                # Now actually copy source to target.
                mode = stat.S_IMODE(st.st_mode)
                if stat.S_ISLNK(st.st_mode):
                    linkto = os.readlink(sourcepath)
                    os.symlink(linkto, targetpath)
                elif stat.S_ISDIR(st.st_mode):
                    if not os.path.isdir(targetpath):
                        try:
                            os.mkdir(targetpath, mode)
                        except OSError as e:
                            # there is a small window where update-apt-cache
                            # can race with us since it creates
                            # "/target/var/cache/apt/...". Hence, ignore
                            # failure if the directory does now exist where
                            # brief moments before it didn't.
                            if e.errno != errno.EEXIST:
                                raise
                elif stat.S_ISCHR(st.st_mode):
                    os.mknod(targetpath, stat.S_IFCHR | mode, st.st_rdev)
                elif stat.S_ISBLK(st.st_mode):
                    os.mknod(targetpath, stat.S_IFBLK | mode, st.st_rdev)
                elif stat.S_ISFIFO(st.st_mode):
                    os.mknod(targetpath, stat.S_IFIFO | mode)
                elif stat.S_ISSOCK(st.st_mode):
                    os.mknod(targetpath, stat.S_IFSOCK | mode)
                elif stat.S_ISREG(st.st_mode):
                    install_misc.copy_file(
                        self.db, sourcepath, targetpath, md5_check)

                # Copy metadata.
                copied_size += st.st_size
                os.lchown(targetpath, st.st_uid, st.st_gid)
                if not stat.S_ISLNK(st.st_mode):
                    os.chmod(targetpath, mode)
                if stat.S_ISDIR(st.st_mode):
                    directory_times.append(
                        (targetpath, st.st_atime, st.st_mtime))
                # os.utime() sets timestamp of target, not link
                elif not stat.S_ISLNK(st.st_mode):
                    try:
                        os.utime(targetpath, (st.st_atime, st.st_mtime))
                    except Exception:
                        # We can live with timestamps being wrong.
                        pass
                if (hasattr(os, "listxattr") and
                        hasattr(os, "supports_follow_symlinks") and
                        os.supports_follow_symlinks):
                    try:
                        attrnames = os.listxattr(
                            sourcepath, follow_symlinks=False)
                        for attrname in attrnames:
                            attrvalue = os.getxattr(
                                sourcepath, attrname, follow_symlinks=False)
                            os.setxattr(
                                targetpath, attrname, attrvalue,
                                follow_symlinks=False)
                    except OSError as e:
                        if e.errno not in (
                                errno.EPERM, errno.ENOTSUP, errno.ENODATA):
                            raise

                if int((copied_size * 90) / total_size) != copy_progress:
                    copy_progress = int((copied_size * 90) / total_size)
                    self.db.progress('SET', 10 + copy_progress)

                time_now = time.time()
                if (time_now - times[-1][0]) >= 0.5:
                    times.append((time_now, copied_size))
                    if not long_enough and time_now - times[0][0] >= 10:
                        long_enough = True
                    if long_enough and time_now - time_last_update >= 2:
                        time_last_update = time_now
                        while (time_now - times[0][0] > 60 and
                               time_now - times[1][0] >= 60):
                            times.pop(0)
                        speed = ((times[-1][1] - times[0][1]) /
                                 (times[-1][0] - times[0][0]))
                        if speed != 0:
                            time_remaining = (
                                int((total_size - copied_size) / speed))
                            if time_remaining < 60:
                                self.db.progress(
                                    'INFO', 'ubiquity/install/copying_minute')

        # Apply timestamps to all directories now that the items within them
        # have been copied.
        for dirtime in directory_times:
            (directory, atime, mtime) = dirtime
            try:
                os.utime(directory, (atime, mtime))
            except Exception:
                # I have no idea why I've been getting lots of bug reports
                # about this failing, but I really don't care. Ignore it.
                pass

        # Revert to previous kernel flush times.
        if dirty_writeback_centisecs is not None:
            with open('/proc/sys/vm/dirty_writeback_centisecs', 'w') as dwc:
                print(dirty_writeback_centisecs, file=dwc)
        if dirty_expire_centisecs is not None:
            with open('/proc/sys/vm/dirty_expire_centisecs', 'w') as dec:
                print(dirty_expire_centisecs, file=dec)

        # Try some possible locations for the kernel we used to boot. This
        # lets us save a couple of megabytes of CD space.
        bootdir = self.target_file('boot')
        kernel = self.find_cd_kernel()
        if kernel:
            prefix = os.path.basename(kernel).split('-', 1)[0]
            release = os.uname()[2]
            target_kernel = os.path.join(bootdir, '%s-%s' % (prefix, release))
            copies = []

            # ISO9660 images may have to use .efi rather than .efi.signed in
            # order to support being booted using isolinux, which must abide
            # by archaic 8.3 restrictions.
            for suffix in (".efi", ".efi.signed"):
                if os.path.exists(kernel + suffix):
                    signed_kernel = kernel + suffix
                    break
            else:
                signed_kernel = None

            if os.path.exists(kernel):
                copies.append((kernel, target_kernel))
            elif signed_kernel is not None:
                # No unsigned kernel.  We'll construct it using sbsigntool.
                copies.append((signed_kernel, target_kernel))

            if signed_kernel is not None:
                copies.append((signed_kernel, "%s.efi.signed" % target_kernel))

            for source, target in copies:
                osextras.unlink_force(target)
                install_misc.copy_file(self.db, source, target, md5_check)
                os.lchown(target, 0, 0)
                os.chmod(target, 0o644)
                st = os.lstat(source)
                try:
                    os.utime(target, (st.st_atime, st.st_mtime))
                except Exception:
                    # We can live with timestamps being wrong.
                    pass

            if not os.path.exists(kernel) and signed_kernel is not None:
                # Construct the unsigned kernel.
                subprocess.check_call(["sbattach", "--remove", target_kernel])

        os.umask(old_umask)

        self.db.progress('SET', 100)
        self.db.progress('STOP')
Ejemplo n.º 37
0
os.fwalk(top="top")  # $ getAPathArgument="top"

# Linux only
os.getxattr("path", "attribute")  # $ getAPathArgument="path"
os.getxattr(path="path", attribute="attribute")  # $ getAPathArgument="path"

# Linux only
os.listxattr("path")  # $ getAPathArgument="path"
os.listxattr(path="path")  # $ getAPathArgument="path"

# Linux only
os.removexattr("path", "attribute")  # $ getAPathArgument="path"
os.removexattr(path="path", attribute="attribute")  # $ getAPathArgument="path"

# Linux only
os.setxattr("path", "attribute", "value")  # $ getAPathArgument="path"
os.setxattr(path="path", attribute="attribute",
            value="value")  # $ getAPathArgument="path"

# Windows only
os.add_dll_directory("path")  # $ getAPathArgument="path"
os.add_dll_directory(path="path")  # $ getAPathArgument="path"

# for `os.exec*`, `os.spawn*`, and `os.posix_spawn*` functions, see the
# `SystemCommandExecution.py` file.

# Windows only
os.startfile("path")  # $ getAPathArgument="path"
os.startfile(path="path")  # $ getAPathArgument="path"

# ------------------------------------------------------------------------------
Ejemplo n.º 38
0
 def set_file_atts(file: str, file_atts: list):
     os.setxattr(file, File.att_file_sum, bytes(file_atts[0], 'utf-8'))
     os.setxattr(file, File.att_time, bytes(file_atts[1], 'utf-8'))
Ejemplo n.º 39
0
		def set(item, name, value, flags=0, namespace=None):
			return os.setxattr(item, name, value, flags=flags)
Ejemplo n.º 40
0
    def setxattr(self, path, name, value, options, position=0):
        logging.debug("setxattr: %s",
                      repr((path, name, value, options, position)))
        src_path = self.getSource(path)

        if name == xattr_revisions_name:
            raise fuse.FuseOSError(errno.EACCES)

        v = 0
        try:
            v = int(value)
        except ValueError:
            raise fuse.FuseOSError(errno.EINVAL)

        file_info = FileInfo()
        if src_path in self.files:
            file_info = self.files
        else:
            file_info.loadFileInfo(src_path)

        src_is_dir = not os.path.islink(src_path) and os.path.isdir(src_path)
        if name == xattr_max_revisions_name:
            if file_info.revisions != v:
                logging.debug(
                    "  changing number of revisions for %s from %d to %d",
                    repr(path), file_info.revisions, v)

                if v < file_info.revisions:
                    file_info.setMaxRevisions(v)
                    f = File(src_path, src_is_dir)
                    f.limitRevisions(file_info)
                else:
                    file_info.setMaxRevisions(v)

                file_info.saveFileInfo(src_path)

            return

        if name == xattr_max_revision_age:
            if file_info.max_age != v:
                logging.debug(
                    "  changing maximal revision age for %s from %d to %d days",
                    repr(path), file_info.max_age, v)

                if v < file_info.max_age:
                    file_info.setMaxRevisionAge(v)
                    f = File(src_path, src_is_dir)
                    f.limitRevisions(file_info)
                else:
                    file_info.setMaxRevisionAge(v)

                file_info.saveFileInfo(src_path)

            return

        if name == xattr_min_revisions_age:
            if file_info.min_revisions != v:
                logging.debug(
                    "  changing minimal number of revisions for %s from %d to %d",
                    repr(path), file_info.min_revisions, v)

                if v < file_info.min_revisions:
                    file_info.setMinRevisionsAge(v)
                    f = File(src_path, src_is_dir)
                    f.limitRevisions(file_info)
                else:
                    file_info.setMinRevisionsAge(v)

                file_info.saveFileInfo(src_path)

            return

        os.setxattr(src_path, name, value, options, follow_symlinks=False)
Ejemplo n.º 41
0
def SetMaxRevisionAge (fname, max_age):
    os.setxattr (fname, xattr_max_revision_age, str (max_age).encode ('ASCII'), follow_symlinks=False)
Ejemplo n.º 42
0
 def addXAttrs(self, force=False):
     attrName = "provenance"
     buf = os.getxattr(path, attrName)
     if (buf and not force): return False
     os.setxattr(self.path, attrName, self.tostring())
     pass
Ejemplo n.º 43
0
 def set_ctime_linux(filepath, timestamp):
     try:
         os.setxattr(filepath, b"user.loguru_crtime",
                     str(timestamp).encode("ascii"))
     except OSError:
         pass
Ejemplo n.º 44
0
def setxattr(self, val):
	os.setxattr(self.abspath(), SIG_VAR, val)
Ejemplo n.º 45
0
def setxattr(self, val):
    os.setxattr(self.abspath(), SIG_VAR, val)
Ejemplo n.º 46
0
def build(args):
    context = Path(args.path).resolve()
    dockerfile = Path(args.file)
    if not dockerfile.is_absolute():
        dockerfile = context / dockerfile
    dockerfile = dockerfile.resolve()
    if not dockerfile.is_file():
        raise FileNotFoundError("{} does not exist".format(dockerfile))

    runtime = Path(args.runtime).resolve()
    r = ImageStorage(runtime)

    df = DockerfileParser(dockerfile)
    if not df.build_commands:
        print("Nothing to do")
        return

    if args.env:
        df.add_env_variables(args.env)

    #  Locate base image for this dockerfile
    parent_hash = ""
    if df.from_image != "scratch":
        parent_hash = r.find_last_build_by_name(df.from_image)
        if not parent_hash:
            raise FileNotFoundError("Image with name {} not found".format(df.from_image))
        print("Using parent image {}".format(parent_hash[:16]))

    #  Update build hashes based on base image
    df.calc_build_hashes(parent_hash=parent_hash)
    total_build_steps = len(df.build_commands)

    #  Early exit if image is already built
    if not args.no_cache and args.rm:
        if (runtime / df.build_hashes[-1]).exists():
            print("==> Already built {}".format(df.build_hashes[-1][:16]))
            return

    #  Do the build
    for current_build_step, (cmd, cmdargs) in enumerate(df.build_commands):
        build_step_hash = df.build_hashes[current_build_step]

        print("==> Building step {}/{} {}".format(current_build_step + 1, total_build_steps, build_step_hash[:16]))

        target = runtime / (build_step_hash + "-init")
        final_target = runtime / build_step_hash
        host_env = {
            "TARGET": str(target),
            "CONTEXT": str(context)
        }
        host_env.update(df.env)

        ## parent image checks
        if final_target.exists():
            if args.no_cache:
                btrfs_subvol_delete(final_target)
            else:
                previous_parent_hash = ""
                try:
                    previous_parent_hash = os.getxattr(str(final_target), b"user.parent_hash").decode()
                except:
                    pass
                if parent_hash and parent_hash != previous_parent_hash:
                    print("  -> parent image hash changed")
                    btrfs_subvol_delete(final_target)
                else:
                    print("  -> Using cached image")
                    parent_hash = build_step_hash
                    continue

        if target.exists():
            print("  -> Deleting incomplete image")
            btrfs_subvol_delete(target)

        if parent_hash:
            btrfs_subvol_snapshot(runtime / parent_hash, target)
        else:
            btrfs_subvol_create(target)

        ## Run build step
        if cmd == "host":
            print('  -> HOST {}'.format(cmdargs))
            subprocess.run(cmdargs, cwd=str(context), check=True, shell=True, env=host_env)

        elif cmd == "run":
            print('  -> RUN {}'.format(cmdargs))
            nspawn_cmd = ['systemd-nspawn', '--quiet']
            for key, val in df.env.items():
                nspawn_cmd.extend(('--setenv', '{}={}'.format(key, val)))
            nspawn_cmd.extend(('--register=no', '-D', str(target), '/bin/sh', '-c', cmdargs))
            subprocess.run(nspawn_cmd, cwd=str(target), check=True, shell=False, env=df.env)

        elif cmd == "copy":
            print("  -> COPY {}".format(cmdargs))
            *srcs, dest = shlex.split(cmdargs)
            if Path(dest).is_absolute():
                dest = target / dest[1:]
            else:
                dest = target / dest
            if len(srcs) > 1 and not dest.is_dir():
                raise NotADirectoryError("Destination must be a directory")
            cmd = ['cp', '-rv']
            cmd.extend(srcs)
            cmd.append(str(dest))
            subprocess.run(cmd, cwd=str(context), check=True, shell=False, env=host_env)

        ## Seal build image
        os.setxattr(str(target), b"user.parent_hash", parent_hash.encode())
        for attr in ("user.cmd.host", "user.cmd.run"):
            try:
                os.removexattr(str(target), attr.encode())
            except:
                pass
        os.setxattr(str(target), "user.cmd.{}".format(cmd).encode(), cmdargs.encode())

        btrfs_subvol_snapshot(target, final_target, readonly=True)
        btrfs_subvol_delete(target)

        parent_hash = build_step_hash

    #  After build cleanup
    if args.rm:
        print("==> Cleanup")
        for build_hash in df.build_hashes[:-1]:
            target = runtime / build_hash
            if target.exists():
                print("  -> Remove intermediate image {}".format(build_hash[:16]))
                btrfs_subvol_delete(target)

    print("==> Successfully built {}".format(parent_hash[:16]))

    if args.tag:
        tmp_tag = runtime / ("tag-" + args.tag + "-tmp")
        if tmp_tag.exists():
            os.unlink(str(tmp_tag))
        os.symlink(str(runtime / parent_hash), str(tmp_tag))
        os.replace(str(tmp_tag), str(runtime / ("tag-" + args.tag)))
        print("==> Tagged image {} as {}".format(parent_hash[:16], args.tag))