Пример #1
0
 def empty(self):
     if "autoresume" in self.settings["options"] \
      and self.resume.is_enabled("empty"):
         log.notice('Resume point detected, skipping empty operation...')
     else:
         if "netboot2/empty" in self.settings:
             if isinstance(self.settings['netboot2/empty'], str):
                 self.settings["netboot2/empty"] = self.settings[
                     "netboot2/empty"].split()
             for x in self.settings["netboot2/empty"]:
                 myemp = self.settings["chroot_path"] + self.settings[
                     "merge_path"] + x
                 if not os.path.isdir(myemp):
                     log.warning(
                         'not a directory or does not exist, skipping "empty" operation: %s',
                         x)
                     continue
                 log.info('Emptying directory %s', x)
                 # stat the dir, delete the dir, recreate the dir and set
                 # the proper perms and ownership
                 mystat = os.stat(myemp)
                 shutil.rmtree(myemp)
                 ensure_dirs(myemp, mode=0o755)
                 os.chown(myemp, mystat[ST_UID], mystat[ST_GID])
                 os.chmod(myemp, mystat[ST_MODE])
     self.resume.enable("empty")
Пример #2
0
def parse_config(config_files):
    for config_file in config_files:
        log.notice('Loading configuration file: %s', config_file)
        try:
            with open(config_file, 'rb') as f:
                config = tomli.load(f)
            for key in config:
                if key not in valid_config_file_values:
                    log.critical("Unknown option '%s' in config file %s", key,
                                 config_file)
            conf_values.update(config)
        except Exception as e:
            log.critical('Could not find parse configuration file: %s: %s',
                         config_file, e)

    # print out any options messages
    for opt in conf_values['options']:
        if opt in option_messages:
            log.info(option_messages[opt])

    if "envscript" in conf_values:
        log.info('Envscript support enabled.')

    # take care of any variable substitutions that may be left
    for x in list(conf_values):
        if isinstance(conf_values[x], str):
            conf_values[x] = conf_values[x] % conf_values
Пример #3
0
    def generate_digests(self):
        for pkgset in self.settings["grp"]:
            if self.settings["grp/" + pkgset + "/type"] == "pkgset":
                destdir = normpath(self.settings["target_path"] + "/" +
                                   pkgset + "/All")
                log.notice('Digesting files in the pkgset...')
                digests = glob.glob(destdir + '/*.DIGESTS')
                for i in digests:
                    if os.path.exists(i):
                        os.remove(i)

                files = os.listdir(destdir)
                #ignore files starting with '.' using list comprehension
                files = [filename for filename in files if filename[0] != '.']
                for i in files:
                    if os.path.isfile(normpath(destdir + "/" + i)):
                        self.gen_contents_file(normpath(destdir + "/" + i))
                        self.gen_digest_file(normpath(destdir + "/" + i))
            else:
                destdir = normpath(self.settings["target_path"] + "/" + pkgset)
                log.notice('Digesting files in the srcset...')

                digests = glob.glob(destdir + '/*.DIGESTS')
                for i in digests:
                    if os.path.exists(i):
                        os.remove(i)

                files = os.listdir(destdir)
                #ignore files starting with '.' using list comprehension
                files = [filename for filename in files if filename[0] != '.']
                for i in files:
                    if os.path.isfile(normpath(destdir + "/" + i)):
                        #self.gen_contents_file(normpath(destdir+"/"+i))
                        self.gen_digest_file(normpath(destdir + "/" + i))
Пример #4
0
	def generate_digests(self):
		for pkgset in self.settings["grp"]:
			if self.settings["grp/"+pkgset+"/type"] == "pkgset":
				destdir=normpath(self.settings["target_path"]+"/"+pkgset+"/All")
				log.notice('Digesting files in the pkgset...')
				digests=glob.glob(destdir+'/*.DIGESTS')
				for i in digests:
					if os.path.exists(i):
						os.remove(i)

				files=os.listdir(destdir)
				#ignore files starting with '.' using list comprehension
				files=[filename for filename in files if filename[0] != '.']
				for i in files:
					if os.path.isfile(normpath(destdir+"/"+i)):
						self.gen_contents_file(normpath(destdir+"/"+i))
						self.gen_digest_file(normpath(destdir+"/"+i))
			else:
				destdir=normpath(self.settings["target_path"]+"/"+pkgset)
				log.notice('Digesting files in the srcset...')

				digests=glob.glob(destdir+'/*.DIGESTS')
				for i in digests:
					if os.path.exists(i):
						os.remove(i)

				files=os.listdir(destdir)
				#ignore files starting with '.' using list comprehension
				files=[filename for filename in files if filename[0] != '.']
				for i in files:
					if os.path.isfile(normpath(destdir+"/"+i)):
						#self.gen_contents_file(normpath(destdir+"/"+i))
						self.gen_digest_file(normpath(destdir+"/"+i))
Пример #5
0
    def update_ebuild_repo(self) -> str:
        repouri = 'https://anongit.gentoo.org/git/repo/sync/gentoo.git'

        if self.ebuild_repo.is_dir():
            git_cmds = [
                [self.git, '-C', self.gitdir, 'fetch', '--quiet', '--depth=1'],
                [
                    self.git, '-C', self.gitdir, 'update-ref', 'HEAD',
                    'FETCH_HEAD'
                ],
                [self.git, '-C', self.gitdir, 'gc', '--quiet'],
            ]
        else:
            git_cmds = [
                [
                    self.git,
                    'clone',
                    '--quiet',
                    '--depth=1',
                    '--bare',
                    # Set some config options to enable git gc to clean everything
                    # except what we just fetched. See git-gc(1).
                    '-c',
                    'gc.reflogExpire=0',
                    '-c',
                    'gc.reflogExpireUnreachable=0',
                    '-c',
                    'gc.rerereresolved=0',
                    '-c',
                    'gc.rerereunresolved=0',
                    '-c',
                    'gc.pruneExpire=now',
                    '--branch=stable',
                    repouri,
                    self.gitdir
                ],
            ]

        try:
            for cmd in git_cmds:
                log.notice('>>> ' + ' '.join(cmd))
                subprocess.run(cmd,
                               capture_output=True,
                               check=True,
                               encoding='utf-8',
                               close_fds=False)

            sp = subprocess.run(
                [self.git, '-C', self.gitdir, 'rev-parse', 'stable'],
                stdout=subprocess.PIPE,
                capture_output=True,
                check=True,
                encoding='utf-8',
                close_fds=False)
            return sp.stdout.rstrip()

        except subprocess.CalledProcessError as e:
            raise CatalystError(f'{e.cmd} failed with return code'
                                f'{e.returncode}\n'
                                f'{e.output}\n')
Пример #6
0
def clear_dir(target,
              mode=0o755,
              chg_flags=False,
              remove=False,
              clear_nondir=True):
    '''Universal directory clearing function

	@target: string, path to be cleared or removed
	@mode: integer, desired mode to set the directory to
	@chg_flags: boolean used for FreeBSD hosts
	@remove: boolean, passed through to clear_dir()
	@return boolean
	'''
    log.debug('start: %s', target)
    if not target:
        log.debug('no target... returning')
        return False

    mystat = None
    if os.path.isdir(target) and not os.path.islink(target):
        log.notice('Emptying directory: %s', target)
        # stat the dir, delete the dir, recreate the dir and set
        # the proper perms and ownership
        try:
            log.debug('os.stat()')
            mystat = os.stat(target)
            # There's no easy way to change flags recursively in python
            if chg_flags and os.uname()[0] == "FreeBSD":
                cmd(['chflags', '-R', 'noschg', target])
            log.debug('shutil.rmtree()')
            shutil.rmtree(target)
        except Exception:
            log.error('clear_dir failed', exc_info=True)
            return False
    elif os.path.exists(target):
        if clear_nondir:
            log.debug("Clearing (unlinking) non-directory: %s", target)
            os.unlink(target)
        else:
            log.info('clear_dir failed: %s: is not a directory', target)
            return False
    else:
        log.debug("Condidtions not met to clear: %s", target)
        log.debug("                       isdir: %s", os.path.isdir(target))
        log.debug("                      islink: %s", os.path.islink(target))
        log.debug("                      exists: %s", os.path.exists(target))

    if not remove:
        log.debug('ensure_dirs()')
        ensure_dirs(target, mode=mode)
        if mystat:
            os.chown(target, mystat[ST_UID], mystat[ST_GID])
            os.chmod(target, mystat[ST_MODE])

    log.debug('DONE, returning True')
    return True
Пример #7
0
	def create_netboot_files(self):
		# finish it all up
		try:
			cmd([self.settings['controller_file'], 'finish'], env=self.env)
		except CatalystError:
			self.unbind()
			raise CatalystError("netboot build aborting due to error.",
				print_traceback=True)
		# end
		log.notice('netboot: build finished !')
Пример #8
0
 def create_netboot_files(self):
     # finish it all up
     try:
         cmd([self.settings['controller_file'], 'finish'], env=self.env)
     except CatalystError:
         self.unbind()
         raise CatalystError("netboot build aborting due to error.",
                             print_traceback=True)
     # end
     log.notice('netboot: build finished !')
Пример #9
0
	def move_kernels(self):
		# we're done, move the kernels to builds/*
		# no auto resume here as we always want the
		# freshest images moved
		try:
			cmd([self.settings['controller_file'], 'final'], env=self.env)
			log.notice('Netboot Build Finished!')
		except CatalystError:
			self.unbind()
			raise CatalystError("Failed to move kernel images!",
				print_traceback=True)
Пример #10
0
	def set_target_path(self):
		self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+\
			self.settings["target_subpath"])
		if "autoresume" in self.settings["options"] \
			and self.resume.is_enabled("setup_target_path"):
			log.notice('Resume point detected, skipping target path setup operation...')
		else:
			# first clean up any existing target stuff
			clear_path(self.settings['target_path'])
			self.resume.enable("setup_target_path")
		ensure_dirs(self.settings["storedir"]+"/builds/")
Пример #11
0
	def setup_overlay(self):
		if "autoresume" in self.settings["options"] \
		and self.resume.is_enabled("setup_overlay"):
			log.notice('Resume point detected, skipping setup_overlay operation...')
		else:
			if "netboot2/overlay" in self.settings:
				for x in self.settings["netboot2/overlay"]:
					if os.path.exists(x):
						cmd("rsync -a "+x+"/ "+\
							self.settings["chroot_path"] + self.settings["merge_path"], "netboot2/overlay: "+x+" copy failed.",env=self.env)
				self.resume.enable("setup_overlay")
Пример #12
0
	def remove(self):
		if "autoresume" in self.settings["options"] \
			and self.resume.is_enabled("remove"):
			log.notice('Resume point detected, skipping remove operation...')
		else:
			if self.settings["spec_prefix"]+"/rm" in self.settings:
				for x in self.settings[self.settings["spec_prefix"]+"/rm"]:
					# we're going to shell out for all these cleaning operations,
					# so we get easy glob handling
					log.notice('netboot2: removing %s', x)
					clear_path(self.settings['chroot_path'] +
						self.settings['merge_path'] + x)
Пример #13
0
 def setup_overlay(self):
     if "autoresume" in self.settings["options"] \
     and self.resume.is_enabled("setup_overlay"):
         log.notice(
             'Resume point detected, skipping setup_overlay operation...')
     else:
         if "netboot2/overlay" in self.settings:
             for x in self.settings["netboot2/overlay"]:
                 if os.path.exists(x):
                     cmd("rsync -a "+x+"/ "+\
                      self.settings["chroot_path"] + self.settings["merge_path"], "netboot2/overlay: "+x+" copy failed.",env=self.env)
             self.resume.enable("setup_overlay")
Пример #14
0
def clear_dir(target, mode=0o755, chg_flags=False, remove=False,
		clear_nondir=True):
	'''Universal directory clearing function

	@target: string, path to be cleared or removed
	@mode: integer, desired mode to set the directory to
	@chg_flags: boolean used for FreeBSD hosts
	@remove: boolean, passed through to clear_dir()
	@return boolean
	'''
	log.debug('start: %s', target)
	if not target:
		log.debug('no target... returning')
		return False

	mystat = None
	if os.path.isdir(target) and not os.path.islink(target):
		log.notice('Emptying directory: %s', target)
		# stat the dir, delete the dir, recreate the dir and set
		# the proper perms and ownership
		try:
			log.debug('os.stat()')
			mystat = os.stat(target)
			# There's no easy way to change flags recursively in python
			if chg_flags and os.uname()[0] == "FreeBSD":
				cmd(['chflags', '-R', 'noschg', target])
			log.debug('shutil.rmtree()')
			shutil.rmtree(target)
		except Exception:
			log.error('clear_dir failed', exc_info=True)
			return False
	elif os.path.exists(target):
		if clear_nondir:
			log.debug("Clearing (unlinking) non-directory: %s", target)
			os.unlink(target)
		else:
			log.info('clear_dir failed: %s: is not a directory', target)
			return False
	else:
		log.debug("Condidtions not met to clear: %s", target)
		log.debug("                       isdir: %s", os.path.isdir(target))
		log.debug("                      islink: %s", os.path.islink(target))
		log.debug("                      exists: %s", os.path.exists(target))

	if not remove:
		log.debug('ensure_dirs()')
		ensure_dirs(target, mode=mode)
		if mystat:
			os.chown(target, mystat[ST_UID], mystat[ST_GID])
			os.chmod(target, mystat[ST_MODE])

	log.debug('DONE, returning True')
	return True
Пример #15
0
	def setup_overlay(self):
		if "autoresume" in self.settings["options"] \
		and self.resume.is_enabled("setup_overlay"):
			log.notice('Resume point detected, skipping setup_overlay operation...')
		else:
			if "netboot2/overlay" in self.settings:
				for x in self.settings["netboot2/overlay"]:
					if os.path.exists(x):
						cmd(['rsync', '-a', x + '/',
							self.settings['chroot_path'] + self.settings['merge_path']],
							env=self.env)
				self.resume.enable("setup_overlay")
Пример #16
0
def parse_config(config_files):
    # search a couple of different areas for the main config file
    myconf = {}

    # try and parse the config file "config_file"
    for config_file in config_files:
        log.notice('Loading configuration file: %s', config_file)
        try:
            config = catalyst.config.ConfigParser(config_file)
            myconf.update(config.get_values())
        except Exception as e:
            log.critical('Could not find parse configuration file: %s: %s',
                         config_file, e)

    # now, load up the values into conf_values so that we can use them
    for x in list(confdefaults):
        if x in myconf:
            if x == 'options':
                conf_values[x] = set(myconf[x].split())
            elif x in ["decompressor_search_order"]:
                conf_values[x] = myconf[x].split()
            else:
                conf_values[x] = myconf[x]
        else:
            conf_values[x] = confdefaults[x]

    # add our python base directory to use for loading target arch's
    conf_values["PythonDir"] = os.path.dirname(os.path.realpath(__file__))

    # print out any options messages
    for opt in conf_values['options']:
        if opt in option_messages:
            log.info(option_messages[opt])

    for key in [
            "digests", "envscript", "var_tmpfs_portage", "port_logdir",
            "local_overlay"
    ]:
        if key in myconf:
            conf_values[key] = myconf[key]

    if "contents" in myconf:
        # replace '-' with '_' (for compatibility with existing configs)
        conf_values["contents"] = myconf["contents"].replace("-", '_')

    if "envscript" in myconf:
        log.info('Envscript support enabled.')

    # take care of any variable substitutions that may be left
    for x in list(conf_values):
        if isinstance(conf_values[x], str):
            conf_values[x] = conf_values[x] % conf_values
Пример #17
0
	def set_target_path(self):
		self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+\
			self.settings["target_subpath"])
		if "autoresume" in self.settings["options"] \
			and self.resume.is_enabled("setup_target_path"):
			log.notice('Resume point detected, skipping target path setup operation...')
		else:
			# first clean up any existing target stuff
			if os.path.isfile(self.settings["target_path"]):
				cmd("rm -f "+self.settings["target_path"], \
					"Could not remove existing file: "+self.settings["target_path"],env=self.env)
				self.resume.enable("setup_target_path")
		ensure_dirs(self.settings["storedir"]+"/builds/")
Пример #18
0
def parse_config(config_files):
	# search a couple of different areas for the main config file
	myconf={}

	# try and parse the config file "config_file"
	for config_file in config_files:
		log.notice('Loading configuration file: %s', config_file)
		try:
			config = catalyst.config.ConfigParser(config_file)
			myconf.update(config.get_values())
		except Exception as e:
			log.critical('Could not find parse configuration file: %s: %s',
				config_file, e)

	# now, load up the values into conf_values so that we can use them
	for x in list(confdefaults):
		if x in myconf:
			if x == 'options':
				conf_values[x] = set(myconf[x].split())
			elif x in ["decompressor_search_order"]:
				conf_values[x] = myconf[x].split()
			else:
				conf_values[x]=myconf[x]
		else:
			conf_values[x]=confdefaults[x]

	# add our python base directory to use for loading target arch's
	conf_values["PythonDir"] = __selfpath__

	# print out any options messages
	for opt in conf_values['options']:
		if opt in option_messages:
			log.info(option_messages[opt])

	for key in ["digests", "envscript", "var_tmpfs_portage", "port_logdir",
				"local_overlay"]:
		if key in myconf:
			conf_values[key] = myconf[key]

	if "contents" in myconf:
		# replace '-' with '_' (for compatibility with existing configs)
		conf_values["contents"] = myconf["contents"].replace("-", '_')

	if "envscript" in myconf:
		log.info('Envscript support enabled.')

	# take care of any variable substitutions that may be left
	for x in list(conf_values):
		if isinstance(conf_values[x], str):
			conf_values[x] = conf_values[x] % conf_values
Пример #19
0
	def purge(self):
		myemp=self.settings["tmp_path"]
		if os.path.isdir(myemp):
			log.notice('Emptying directory %s', myemp)
			# stat the dir, delete the dir, recreate the dir and set
			# the proper perms and ownership
			mystat=os.stat(myemp)
			# There's no easy way to change flags recursively in python
			if os.uname()[0] == "FreeBSD":
				os.system("chflags -R noschg "+myemp)
			shutil.rmtree(myemp)
			ensure_dirs(myemp, mode=0o755)
			os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
			os.chmod(myemp,mystat[ST_MODE])
Пример #20
0
 def set_target_path(self):
     self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+\
      self.settings["target_subpath"])
     if "autoresume" in self.settings["options"] \
      and self.resume.is_enabled("setup_target_path"):
         log.notice(
             'Resume point detected, skipping target path setup operation...'
         )
     else:
         # first clean up any existing target stuff
         if os.path.isfile(self.settings["target_path"]):
             cmd("rm -f "+self.settings["target_path"], \
              "Could not remove existing file: "+self.settings["target_path"],env=self.env)
             self.resume.enable("setup_target_path")
     ensure_dirs(self.settings["storedir"] + "/builds/")
Пример #21
0
	def empty(self):
		if "autoresume" in self.settings["options"] \
			and self.resume.is_enabled("empty"):
			log.notice('Resume point detected, skipping empty operation...')
		else:
			if "netboot2/empty" in self.settings:
				if isinstance(self.settings['netboot2/empty'], str):
					self.settings["netboot2/empty"]=self.settings["netboot2/empty"].split()
				for x in self.settings["netboot2/empty"]:
					myemp=self.settings["chroot_path"] + self.settings["merge_path"] + x
					if not os.path.isdir(myemp):
						log.warning('not a directory or does not exist, skipping "empty" operation: %s', x)
						continue
					log.info('Emptying directory %s', x)
					# stat the dir, delete the dir, recreate the dir and set
					# the proper perms and ownership
					clear_dir(myemp)
		self.resume.enable("empty")
Пример #22
0
def clear_dir(target, mode=0o755, remove=False):
    '''Universal directory clearing function

    @target: string, path to be cleared or removed
    @mode: integer, desired mode to set the directory to
    @remove: boolean, passed through to clear_dir()
    @return boolean
    '''
    log.debug('start: %s', target)
    if not target:
        log.debug('no target... returning')
        return False

    mystat = None
    if os.path.isdir(target) and not os.path.islink(target):
        log.notice('Emptying directory: %s', target)
        # stat the dir, delete the dir, recreate the dir and set
        # the proper perms and ownership
        try:
            log.debug('os.stat()')
            mystat = os.stat(target)
            log.debug('shutil.rmtree()')
            shutil.rmtree(target)
        except Exception:
            log.error('clear_dir failed', exc_info=True)
            return False
    elif os.path.exists(target):
        log.debug("Clearing (unlinking) non-directory: %s", target)
        os.unlink(target)
    else:
        log.debug("Conditions not met to clear: %s", target)
        log.debug("                      isdir: %s", os.path.isdir(target))
        log.debug("                     islink: %s", os.path.islink(target))
        log.debug("                     exists: %s", os.path.exists(target))

    if not remove:
        log.debug('ensure_dirs()')
        ensure_dirs(target, mode=mode)
        if mystat:
            os.chown(target, mystat[ST_UID], mystat[ST_GID])
            os.chmod(target, mystat[ST_MODE])

    log.debug('DONE, returning True')
    return True
Пример #23
0
    def copy_files_to_image(self):
        # copies specific files from the buildroot to merge_path
        myfiles = []

        # check for autoresume point
        if "autoresume" in self.settings["options"] \
                and self.resume.is_enabled("copy_files_to_image"):
            log.notice(
                'Resume point detected, skipping target path setup operation...'
            )
        else:
            if "netboot/packages" in self.settings:
                if isinstance(self.settings['netboot/packages'], str):
                    loopy = [self.settings["netboot/packages"]]
                else:
                    loopy = self.settings["netboot/packages"]

            for x in loopy:
                if "netboot/packages/" + x + "/files" in self.settings:
                    if isinstance(
                            self.settings['netboot/packages/' + x + '/files'],
                            list):
                        myfiles.extend(self.settings["netboot/packages/" + x +
                                                     "/files"])
                    else:
                        myfiles.append(self.settings["netboot/packages/" + x +
                                                     "/files"])

            if "netboot/extra_files" in self.settings:
                if isinstance(self.settings['netboot/extra_files'], list):
                    myfiles.extend(self.settings["netboot/extra_files"])
                else:
                    myfiles.append(self.settings["netboot/extra_files"])

            try:
                cmd([self.settings['controller_file'], 'image'] + myfiles,
                    env=self.env)
            except CatalystError:
                self.unbind()
                raise CatalystError("Failed to copy files to image!",
                                    print_traceback=True)

            self.resume.enable("copy_files_to_image")
Пример #24
0
 def clean_stage1(self):
     '''seedcache is enabled, so salvage the /tmp/stage1root,
     remove the seed chroot'''
     log.notice('Salvaging the stage1root from the chroot path ...')
     # move the self.settings["stage_path"] outside of the self.settings["chroot_path"]
     tmp_path = normpath(self.settings["storedir"] + "/tmp/" + "stage1root")
     if move_path(self.settings["stage_path"], tmp_path):
         self.remove_chroot()
         # move it to self.settings["chroot_path"]
         if not move_path(tmp_path, self.settings["chroot_path"]):
             log.error(
                 'clean_stage1 failed, see previous log messages for details'
             )
             return False
         log.notice(
             'Successfully moved and cleaned the stage1root for the seedcache'
         )
         return True
     log.error(
         'clean_stage1 failed to move the stage1root to a temporary loation'
     )
     return False
Пример #25
0
	def empty(self):
		if "autoresume" in self.settings["options"] \
			and self.resume.is_enabled("empty"):
			log.notice('Resume point detected, skipping empty operation...')
		else:
			if "netboot2/empty" in self.settings:
				if isinstance(self.settings['netboot2/empty'], str):
					self.settings["netboot2/empty"]=self.settings["netboot2/empty"].split()
				for x in self.settings["netboot2/empty"]:
					myemp=self.settings["chroot_path"] + self.settings["merge_path"] + x
					if not os.path.isdir(myemp):
						log.warning('not a directory or does not exist, skipping "empty" operation: %s', x)
						continue
					log.info('Emptying directory %s', x)
					# stat the dir, delete the dir, recreate the dir and set
					# the proper perms and ownership
					mystat=os.stat(myemp)
					shutil.rmtree(myemp)
					ensure_dirs(myemp, mode=0o755)
					os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
					os.chmod(myemp,mystat[ST_MODE])
		self.resume.enable("empty")
Пример #26
0
    def purge(self, remove=False):
        countdown(10, "Purging Caches ...")
        if any(k in self.settings["options"] for k in ("purge",
                                                       "purgeonly", "purgetmponly")):
            log.notice('purge(); clearing autoresume ...')
            self.clear_autoresume()

            log.notice('purge(); clearing chroot ...')
            self.clear_chroot()

            if "purgetmponly" not in self.settings["options"]:
                log.notice('purge(); clearing package cache ...')
                self.clear_packages(remove)

            log.notice('purge(); clearing kerncache ...')
            self.clear_kerncache(remove)
Пример #27
0
	def copy_files_to_image(self):
		# copies specific files from the buildroot to merge_path
		myfiles=[]

		# check for autoresume point
		if "autoresume" in self.settings["options"] \
			and self.resume.is_enabled("copy_files_to_image"):
			log.notice('Resume point detected, skipping target path setup operation...')
		else:
			if "netboot2/packages" in self.settings:
				if isinstance(self.settings['netboot2/packages'], str):
					loopy=[self.settings["netboot2/packages"]]
				else:
					loopy=self.settings["netboot2/packages"]

			for x in loopy:
				if "netboot2/packages/"+x+"/files" in self.settings:
					if isinstance(self.settings['netboot2/packages/'+x+'/files'], list):
						myfiles.extend(self.settings["netboot2/packages/"+x+"/files"])
					else:
						myfiles.append(self.settings["netboot2/packages/"+x+"/files"])

			if "netboot2/extra_files" in self.settings:
				if isinstance(self.settings['netboot2/extra_files'], list):
					myfiles.extend(self.settings["netboot2/extra_files"])
				else:
					myfiles.append(self.settings["netboot2/extra_files"])

			try:
				cmd([self.settings['controller_file'], 'image'] +
					myfiles, env=self.env)
			except CatalystError:
				self.unbind()
				raise CatalystError("Failed to copy files to image!",
					print_traceback=True)

			self.resume.enable("copy_files_to_image")
Пример #28
0
	def run(self):
		if "purgeonly" in self.settings["options"]:
			self.purge()
			return True

		if "purge" in self.settings["options"]:
			self.purge()

		success = True
		self.setup()
		log.notice('Creating %s tree snapshot %s from %s ...',
			self.settings["repo_name"], self.settings['version_stamp'],
			self.settings['portdir'])

		mytmp=self.settings["tmp_path"]
		ensure_dirs(mytmp)

		cmd(['rsync', '-a', '--no-o', '--no-g', '--delete',
			'--exclude=/packages/',
			'--exclude=/distfiles/',
			'--exclude=/local/',
			'--exclude=CVS/',
			'--exclude=.svn',
			'--exclude=.git/',
			'--filter=H_**/files/digest-*',
			self.settings['portdir'] + '/',
			mytmp + '/' + self.settings['repo_name'] + '/'],
			env=self.env)

		log.notice('Compressing %s snapshot tarball ...', self.settings["repo_name"])
		compressor = CompressMap(self.settings["compress_definitions"],
			env=self.env, default_mode=self.settings['compression_mode'],
			comp_prog=self.settings["comp_prog"])
		infodict = compressor.create_infodict(
			source=self.settings["repo_name"],
			destination=self.settings["snapshot_path"],
			basedir=mytmp,
			filename=self.settings["snapshot_path"],
			mode=self.settings["compression_mode"],
			auto_extension=True
			)
		if not compressor.compress(infodict):
			success = False
			log.error('Snapshot compression failure')
		else:
			filename = '.'.join([self.settings["snapshot_path"],
				compressor.extension(self.settings["compression_mode"])])
			log.notice('Snapshot successfully written to %s', filename)
			self.gen_contents_file(filename)
			self.gen_digest_file(filename)
		if "keepwork" not in self.settings["options"]:
			self.cleanup()
		if success:
			log.info('snapshot: complete!')
		return success
Пример #29
0
	def run(self):
		if "purgeonly" in self.settings["options"]:
			self.purge()
			return True

		if "purge" in self.settings["options"]:
			self.purge()

		success = True
		self.setup()
		log.notice('Creating %s tree snapshot %s from %s ...',
			self.settings["repo_name"], self.settings['version_stamp'],
			self.settings['portdir'])

		mytmp=self.settings["tmp_path"]
		ensure_dirs(mytmp)

		cmd(['rsync', '-a', '--no-o', '--no-g', '--delete',
			'--exclude=/packages/',
			'--exclude=/distfiles/',
			'--exclude=/local/',
			'--exclude=CVS/',
			'--exclude=.svn',
			'--filter=H_**/files/digest-*',
			self.settings['portdir'] + '/',
			mytmp + '/' + self.settings['repo_name'] + '/'],
			env=self.env)

		log.notice('Compressing %s snapshot tarball ...', self.settings["repo_name"])
		compressor = CompressMap(self.settings["compress_definitions"],
			env=self.env, default_mode=self.settings['compression_mode'],
			comp_prog=self.settings["comp_prog"])
		infodict = compressor.create_infodict(
			source=self.settings["repo_name"],
			destination=self.settings["snapshot_path"],
			basedir=mytmp,
			filename=self.settings["snapshot_path"],
			mode=self.settings["compression_mode"],
			auto_extension=True
			)
		if not compressor.compress(infodict):
			success = False
			log.error('Snapshot compression failure')
		else:
			filename = '.'.join([self.settings["snapshot_path"],
				compressor.extension(self.settings["compression_mode"])])
			log.notice('Snapshot successfully written to %s', filename)
			self.gen_contents_file(filename)
			self.gen_digest_file(filename)
		if "keepwork" not in self.settings["options"]:
			self.cleanup()
		if success:
			log.info('snapshot: complete!')
		return success
Пример #30
0
    def run(self):
        if self.settings['snapshot_treeish'] == 'stable':
            treeish = self.update_ebuild_repo()
        else:
            treeish = self.settings['snapshot_treeish']

        self.set_snapshot(treeish)

        git_cmd = [
            self.git, '-C', self.gitdir, 'archive', '--format=tar', treeish
        ]
        tar2sqfs_cmd = [
            command('tar2sqfs'),
            str(self.snapshot), '-q', '-f', '-j1', '-c', 'gzip'
        ]

        log.notice('Creating %s tree snapshot %s from %s',
                   self.settings['repo_name'], treeish, self.gitdir)
        log.notice('>>> ' + ' '.join([*git_cmd, '|']))
        log.notice('    ' + ' '.join(tar2sqfs_cmd))

        lockfile = self.snapshot.with_suffix('.lock')
        with write_lock(lockfile):
            git = subprocess.Popen(git_cmd,
                                   stdout=subprocess.PIPE,
                                   stderr=sys.stderr,
                                   close_fds=False)
            tar2sqfs = subprocess.Popen(tar2sqfs_cmd,
                                        stdin=git.stdout,
                                        stdout=sys.stdout,
                                        stderr=sys.stderr,
                                        close_fds=False)
            git.stdout.close()
            git.wait()
            tar2sqfs.wait()

        if tar2sqfs.returncode == 0:
            log.notice('Wrote snapshot to %s', self.snapshot)
        else:
            log.error('Failed to create snapshot')
        return tar2sqfs.returncode == 0
Пример #31
0
	def run(self):
		if "purgeonly" in self.settings["options"]:
			self.purge()
			return True

		if "purge" in self.settings["options"]:
			self.purge()

		success = True
		self.setup()
		log.notice('Creating Portage tree snapshot %s from %s ...',
			self.settings['version_stamp'], self.settings['portdir'])

		mytmp=self.settings["tmp_path"]
		ensure_dirs(mytmp)

		target_snapshot = self.settings["portdir"] + "/ " + mytmp + "/%s/" % self.settings["repo_name"]
		cmd("rsync -a --no-o --no-g --delete --exclude /packages/ --exclude /distfiles/ " +
			"--exclude /local/ --exclude CVS/ --exclude .svn --filter=H_**/files/digest-* " +
			target_snapshot,
			"Snapshot failure", env=self.env)

		log.notice('Compressing Portage snapshot tarball ...')
		compressor = CompressMap(self.settings["compress_definitions"],
			env=self.env, default_mode=self.settings['compression_mode'])
		infodict = compressor.create_infodict(
			source=self.settings["repo_name"],
			destination=self.settings["snapshot_path"],
			basedir=mytmp,
			filename=self.settings["snapshot_path"],
			mode=self.settings["compression_mode"],
			auto_extension=True
			)
		if not compressor.compress(infodict):
			success = False
			log.error('Snapshot compression failure')
		else:
			filename = '.'.join([self.settings["snapshot_path"],
				compressor.extension(self.settings["compression_mode"])])
			log.notice('Snapshot successfully written to %s', filename)
			self.gen_contents_file(filename)
			self.gen_digest_file(filename)

		self.cleanup()
		if success:
			log.info('snapshot: complete!')
		return success
Пример #32
0
	def set_stage_path(self):
		self.settings["stage_path"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
		log.notice('stage1 stage path is %s', self.settings['stage_path'])
Пример #33
0
def _main(parser, opts):
    """The "main" main function so we can trace/profile."""
    # Initialize the logger before anything else.
    log_level = opts.log_level
    if log_level is None:
        if opts.debug:
            log_level = 'debug'
        elif opts.verbose:
            log_level = 'info'
        else:
            log_level = 'notice'
    log.setup_logging(log_level,
                      output=opts.log_file,
                      debug=opts.debug,
                      color=opts.color)

    # Parse the command line options.
    myconfigs = opts.configs
    if not myconfigs:
        myconfigs = [DEFAULT_CONFIG_FILE]
    myspecfile = opts.file
    mycmdline = opts.cli[:]

    if opts.snapshot:
        mycmdline.append('target=snapshot')
        mycmdline.append('version_stamp=' + opts.snapshot)

    conf_values['DEBUG'] = opts.debug
    conf_values['VERBOSE'] = opts.debug or opts.verbose

    options = set()
    if opts.fetchonly:
        options.add('fetch')
    if opts.purge:
        options.add('purge')
    if opts.purgeonly:
        options.add('purgeonly')
    if opts.purgetmponly:
        options.add('purgetmponly')
    if opts.clear_autoresume:
        options.add('clear-autoresume')

    # Make sure we have some work before moving further.
    if not myspecfile and not mycmdline:
        parser.error('please specify one of either -f or -C or -s')

    # made it this far so start by outputting our version info
    version()
    # import configuration file and import our main module using those settings
    parse_config(myconfigs)

    conf_values["options"].update(options)
    log.notice('conf_values[options] = %s', conf_values['options'])

    # initialize our contents generator
    contents_map = ContentsMap(CONTENTS_DEFINITIONS,
                               comp_prog=conf_values['comp_prog'],
                               decomp_opt=conf_values['decomp_opt'],
                               list_xattrs_opt=conf_values['list_xattrs_opt'])
    conf_values["contents_map"] = contents_map

    # initialze our hash and contents generators
    hash_map = HashMap(HASH_DEFINITIONS)
    conf_values["hash_map"] = hash_map

    # initialize our (de)compression definitions
    conf_values['decompress_definitions'] = DECOMPRESS_DEFINITIONS
    conf_values['compress_definitions'] = COMPRESS_DEFINITIONS
    # TODO add capability to config/spec new definitions

    # Start checking that digests are valid now that hash_map is initialized
    if "digests" in conf_values:
        digests = set(conf_values['digests'].split())
        valid_digests = set(HASH_DEFINITIONS.keys())

        # Use the magic keyword "auto" to use all algos that are available.
        skip_missing = False
        if 'auto' in digests:
            skip_missing = True
            digests.remove('auto')
            if not digests:
                digests = set(valid_digests)

        # First validate all the requested digests are valid keys.
        if digests - valid_digests:
            log.critical(
                'These are not valid digest entries:\n'
                '%s\n'
                'Valid digest entries:\n'
                '%s', ', '.join(digests - valid_digests),
                ', '.join(sorted(valid_digests)))

        # Then check for any programs that the hash func requires.
        for digest in digests:
            try:
                process.find_binary(hash_map.hash_map[digest].cmd)
            except process.CommandNotFound:
                # In auto mode, just ignore missing support.
                if skip_missing:
                    digests.remove(digest)
                    continue
                log.critical(
                    'The "%s" binary needed by digest "%s" was not found. '
                    'It needs to be in your system path.',
                    hash_map.hash_map[digest].cmd, digest)

        # Now reload the config with our updated value.
        conf_values['digests'] = ' '.join(digests)

    if "hash_function" in conf_values:
        if conf_values["hash_function"] not in HASH_DEFINITIONS:
            log.critical(
                '%s is not a valid hash_function entry\n'
                'Valid hash_function entries:\n'
                '%s', conf_values["hash_function"], HASH_DEFINITIONS.keys())
        try:
            process.find_binary(
                hash_map.hash_map[conf_values["hash_function"]].cmd)
        except process.CommandNotFound:
            log.critical(
                'The "%s" binary needed by hash_function "%s" was not found. '
                'It needs to be in your system path.',
                hash_map.hash_map[conf_values['hash_function']].cmd,
                conf_values['hash_function'])

    # detect GNU sed
    for sed in ('/usr/bin/gsed', '/bin/sed', '/usr/bin/sed'):
        if os.path.exists(sed):
            conf_values["sed"] = sed
            break

    addlargs = {}

    if myspecfile:
        log.notice("Processing spec file: %s", myspecfile)
        spec = catalyst.config.SpecParser(myspecfile)
        addlargs.update(spec.get_values())

    if mycmdline:
        try:
            cmdline = catalyst.config.ConfigParser()
            cmdline.parse_lines(mycmdline)
            addlargs.update(cmdline.get_values())
        except CatalystError:
            log.critical('Could not parse commandline')

    if "target" not in addlargs:
        raise CatalystError("Required value \"target\" not specified.")

    if os.getuid() != 0:
        # catalyst cannot be run as a normal user due to chroots, mounts, etc
        log.critical('This script requires root privileges to operate')

    # Namespaces aren't supported on *BSDs at the moment. So let's check
    # whether we're on Linux.
    if os.uname().sysname in ["Linux", "linux"]:
        # Start off by creating unique namespaces to run in.  Would be nice to
        # use pid & user namespaces, but snakeoil's namespace module has signal
        # transfer issues (CTRL+C doesn't propagate), and user namespaces need
        # more work due to Gentoo build process (uses sudo/root/portage).
        namespaces.simple_unshare(mount=True,
                                  uts=True,
                                  ipc=True,
                                  pid=False,
                                  net=False,
                                  user=False,
                                  hostname='catalyst')

    # everything is setup, so the build is a go
    try:
        success = build_target(addlargs)
    except KeyboardInterrupt:
        log.critical('Catalyst build aborted due to user interrupt (Ctrl-C)')
    if not success:
        sys.exit(2)
    sys.exit(0)
Пример #34
0
 def move_kernels(self):
     # we're done, move the kernels to builds/*
     # no auto resume here as we always want the
     # freshest images moved
     cmd([self.settings['controller_file'], 'final'], env=self.env)
     log.notice('Netboot Build Finished!')
Пример #35
0
 def remove_chroot(self):
     log.notice('Removing the chroot path ...')
     clear_dir(self.settings["chroot_path"], mode=0o755, remove=True)
Пример #36
0
def _main(parser, opts):
    """The "main" main function so we can trace/profile."""
    # Initialize the logger before anything else.
    log_level = opts.log_level
    if log_level is None:
        if opts.debug:
            log_level = 'debug'
        elif opts.verbose:
            log_level = 'info'
        else:
            log_level = 'notice'
    log.setup_logging(log_level,
                      output=opts.log_file,
                      debug=opts.debug,
                      color=opts.color)

    # Parse the command line options.
    myconfigs = opts.configs
    if not myconfigs:
        myconfigs = [DEFAULT_CONFIG_FILE]
    myspecfile = opts.file

    mycmdline = list()
    if opts.snapshot:
        mycmdline.append('target: snapshot')
        mycmdline.append('snapshot_treeish: ' + opts.snapshot)

    conf_values['DEBUG'] = opts.debug
    conf_values['VERBOSE'] = opts.debug or opts.verbose

    options = []
    if opts.fetchonly:
        options.append('fetch')
    if opts.purge:
        options.append('purge')
    if opts.purgeonly:
        options.append('purgeonly')
    if opts.purgetmponly:
        options.append('purgetmponly')
    if opts.clear_autoresume:
        options.append('clear-autoresume')
    if opts.enter_chroot:
        options.append('enter-chroot')

    # Make sure we have some work before moving further.
    if not myspecfile and not mycmdline:
        parser.error('please specify one of either -f or -C or -s')

    # made it this far so start by outputting our version info
    version()
    # import configuration file and import our main module using those settings
    parse_config(myconfigs)

    conf_values["options"].extend(options)
    log.notice('conf_values[options] = %s', conf_values['options'])

    # initialize our contents generator
    contents_map = ContentsMap(CONTENTS_DEFINITIONS,
                               comp_prog=conf_values['comp_prog'],
                               decomp_opt=conf_values['decomp_opt'],
                               list_xattrs_opt=conf_values['list_xattrs_opt'])
    conf_values["contents_map"] = contents_map

    # initialize our (de)compression definitions
    conf_values['decompress_definitions'] = DECOMPRESS_DEFINITIONS
    conf_values['compress_definitions'] = COMPRESS_DEFINITIONS
    # TODO add capability to config/spec new definitions

    if "digests" in conf_values:
        valid_digests = hashlib.algorithms_available
        digests = set(conf_values['digests'])
        conf_values['digests'] = digests

        # First validate all the requested digests are valid keys.
        if digests - valid_digests:
            raise CatalystError('These are not valid digest entries:\n%s\n'
                                'Valid digest entries:\n%s' %
                                (', '.join(sorted(digests - valid_digests)),
                                 ', '.join(sorted(valid_digests))))

    addlargs = {}

    if myspecfile:
        log.notice("Processing spec file: %s", myspecfile)
        spec = catalyst.config.SpecParser(myspecfile)
        addlargs.update(spec.get_values())

    if mycmdline:
        try:
            cmdline = catalyst.config.SpecParser()
            cmdline.parse_lines(mycmdline)
            addlargs.update(cmdline.get_values())
        except CatalystError:
            log.critical('Could not parse commandline')

    if "target" not in addlargs:
        raise CatalystError("Required value \"target\" not specified.")

    if os.getuid() != 0:
        # catalyst cannot be run as a normal user due to chroots, mounts, etc
        log.critical('This script requires root privileges to operate')

    # Start off by creating unique namespaces to run in.  Would be nice to
    # use pid & user namespaces, but snakeoil's namespace module has signal
    # transfer issues (CTRL+C doesn't propagate), and user namespaces need
    # more work due to Gentoo build process (uses sudo/root/portage).
    with namespace(uts=True, ipc=True, hostname='catalyst'):
        # everything is setup, so the build is a go
        try:
            success = build_target(addlargs)
        except KeyboardInterrupt:
            log.critical(
                'Catalyst build aborted due to user interrupt (Ctrl-C)')

    if not success:
        sys.exit(2)
    sys.exit(0)
Пример #37
0
 def clear_autoresume(self):
     """ Clean resume points since they are no longer needed """
     if "autoresume" in self.settings["options"]:
         log.notice('Removing AutoResume Points ...')
         self.resume.clear_all()
Пример #38
0
 def clear_kerncache(self, remove=False):
     if "kerncache" in self.settings["options"]:
         log.notice('purging the kerncache ...')
         clear_dir(self.settings["kerncache_path"], remove=remove)
Пример #39
0
 def clear_packages(self, remove=False):
     if "pkgcache" in self.settings["options"]:
         log.notice('purging the pkgcache ...')
         clear_dir(self.settings["pkgcache_path"], remove=remove)
Пример #40
0
 def set_stage_path(self):
     self.settings["stage_path"] = normpath(self.settings["chroot_path"] +
                                            self.settings["root_path"])
     log.notice('stage1 stage path is %s', self.settings['stage_path'])
Пример #41
0
 def remove_autoresume(self):
     """ Rmove all resume points since they are no longer needed """
     if "autoresume" in self.settings["options"]:
         log.notice('Removing AutoResume ...')
         self.resume.clear_all(remove=True)
Пример #42
0
 def clear_chroot(self):
     log.notice('Clearing the chroot path ...')
     clear_dir(self.settings["chroot_path"], mode=0o755)