Example #1
0
	def initialize(self):
		# Open the database.
		self.open()

		# Check if we actually can open the database.
		if not self.format in DATABASE_FORMATS_SUPPORTED:
			raise DatabaseFormatError, _("The format of the database is not supported by this version of pakfire.")
Example #2
0
	def __init__(self, pakfire, repo, filename):
		base.Package.__init__(self, pakfire, repo)
		self.filename = os.path.abspath(filename)

		# Place to cache the metadata
		self._metadata = {}

		# Place to cache the filelist and payload compression algorithm.
		self._filelist = None
		self.__payload_compression = None

		# Store the format of this package file.
		self.format = self.get_format()

		# XXX need to make this much better.
		self.check()

		# Read the info file.
		if self.format >= 1:
			a = self.open_archive()
			f = a.extractfile("info")

			self.lexer = lexer.FileLexer(f.readlines())

			f.close()
			a.close()

		elif self.format == 0:
			pass

		else:
			raise PackageFormatUnsupportedError, _("Filename: %s") % self.filename
Example #3
0
    def update_metadata(self, force=False, offline=False):
        filename = os.path.join(METADATA_DOWNLOAD_PATH, METADATA_DOWNLOAD_FILE)
        cache_filename = self.cache_path(os.path.basename(filename))

        # Check if the metadata is already recent enough...
        exists = self.cache.exists(cache_filename)

        if not exists and offline:
            raise OfflineModeError, _("No metadata available for repository %s. Cannot download any.") \
             % self.name

        elif exists and offline:
            # Repository metadata exists. We cannot update anything because of the offline mode.
            return

        if not force and exists:
            age = self.cache.age(cache_filename)
            if age and age < TIME_10M:
                log.debug(
                    "Metadata is recent enough. I don't download it again.")
                return

        # Going to download metada.
        log.debug("Going to download repository metadata for %s..." %
                  self.name)
        assert not offline

        grabber = downloader.MetadataDownloader(self.pakfire)
        grabber = self.mirrors.group(grabber)

        while True:
            try:
                data = grabber.urlread(filename, limit=METADATA_DOWNLOAD_LIMIT)
            except urlgrabber.grabber.URLGrabError, e:
                if e.errno == 256:
                    raise DownloadError, _(
                        "Could not update metadata for %s from any mirror server"
                    ) % self.name

                grabber.increment_mirror(grabber)
                continue

            # Parse new metadata for comparison.
            md = metadata.Metadata(self.pakfire, metadata=data)

            if self.metadata and md < self.metadata:
                log.warning(
                    _("The downloaded metadata was less recent than the current one."
                      ))
                grabber.increment_mirror(grabber)
                continue

            # If the download went well, we write the downloaded data to disk
            # and break the loop.
            f = self.cache.open(cache_filename, "w")
            f.write(data)
            f.close()

            break
Example #4
0
    def update_database(self, force=False, offline=False):
        assert self.metadata, "Metadata needs to be openend first."

        # Construct cache and download filename.
        filename = os.path.join(METADATA_DOWNLOAD_PATH, self.metadata.database)
        cache_filename = self.cache_path("database", self.metadata.database)

        if not force:
            force = not self.cache.exists(cache_filename)

        # Raise an exception when we are running in offline mode but an update is required.
        if force and offline:
            raise OfflineModeError, _(
                "Cannot download package database for %s in offline mode."
            ) % self.name

        elif not force:
            return

        # Just make sure we don't try to download anything in offline mode.
        assert not offline

        # Initialize a grabber for download.
        grabber = downloader.DatabaseDownloader(
            self.pakfire,
            text=_("%s: package database") % self.name,
        )
        grabber = self.mirrors.group(grabber)

        while True:
            # Open file on server.
            urlobj = fileobj = grabber.urlopen(filename)

            if self.metadata.database_compression:
                fileobj = compress.decompressobj(
                    fileobj=fileobj, algo=self.metadata.database_compression)

            # Make a new file in the cache.
            cacheobj = self.cache.open(cache_filename, "wb")

            try:
                while True:
                    buf = fileobj.read(BUFFER_SIZE)
                    if not buf:
                        break
                    cacheobj.write(buf)

            finally:
                # XXX we should catch decompression errors

                # Close all file descriptors.
                cacheobj.close()
                fileobj.close()
                if not urlobj == fileobj:
                    urlobj.close()

            break
Example #5
0
    def initialize(self):
        # Open the database.
        self.open()

        # Check if we actually can open the database.
        if not self.format in DATABASE_FORMATS_SUPPORTED:
            raise DatabaseFormatError, _(
                "The format of the database is not supported by this version of pakfire."
            )
Example #6
0
	def execute(self):
		# Save start time.
		self.time_start = time.time()

		if self.logger:
			self.logger.debug(_("Executing command: %s in %s") % (self.command, self.chroot_path or "/"))

		child = None
		try:
			# Create new child process
			child = self.create_subprocess()

			# Record the output.
			self.tee_log(child)
		except:
			# In case there has been an error, kill children if they aren't done
			if child and child.returncode is None:
				os.killpg(child.pid, 9)

			try:
				if child:
					os.waitpid(child.pid, 0)
			except:
				pass

			# Raise original exception.
			raise

		finally:
			# Save end time.
			self.time_end = time.time()

		# wait until child is done, kill it if it passes timeout
		nice_exit = True
		while child.poll() is None:
			if self.timeout_has_been_exceeded():
				nice_exit = False
				os.killpg(child.pid, 15)

			if self.timeout_has_been_exceeded(3):
				nice_exit = False
				os.killpg(child.pid, 9)

		if not nice_exit:
			raise commandTimeoutExpired, (_("Command exceeded timeout (%(timeout)d): %(command)s") % (self.timeout, self.command))

		# Save exitcode.
		self.exitcode = child.returncode

		if self.logger:
			self.logger.debug(_("Child returncode was: %s") % self.exitcode)

		if self.exitcode and self.log_errors:
			raise ShellEnvironmentError, (_("Command failed: %s") % self.command, self.exitcode)

		return self.exitcode
Example #7
0
	def update_database(self, force=False, offline=False):
		assert self.metadata, "Metadata needs to be openend first."

		# Construct cache and download filename.
		filename = os.path.join(METADATA_DOWNLOAD_PATH, self.metadata.database)
		cache_filename = self.cache_path("database", self.metadata.database)

		if not force:
			force = not self.cache.exists(cache_filename)

		# Raise an exception when we are running in offline mode but an update is required.
		if force and offline:
			raise OfflineModeError, _("Cannot download package database for %s in offline mode.") % self.name

		elif not force:
			return

		# Just make sure we don't try to download anything in offline mode.
		assert not offline

		# Initialize a grabber for download.
		grabber = downloader.DatabaseDownloader(
			self.pakfire,
			text = _("%s: package database") % self.name,
		)
		grabber = self.mirrors.group(grabber)

		while True:
			# Open file on server.
			urlobj = fileobj = grabber.urlopen(filename)

			if self.metadata.database_compression:
				fileobj = compress.decompressobj(fileobj=fileobj,
					algo=self.metadata.database_compression)

			# Make a new file in the cache.
			cacheobj = self.cache.open(cache_filename, "wb")

			try:
				while True:
					buf = fileobj.read(BUFFER_SIZE)
					if not buf:
						break
					cacheobj.write(buf)

			finally:
				# XXX we should catch decompression errors

				# Close all file descriptors.
				cacheobj.close()
				fileobj.close()
				if not urlobj == fileobj:
					urlobj.close()

			break
Example #8
0
	def lint(self):
		errors = []

		if not self.name:
			errors.append(_("Package name is undefined."))

		if not self.version:
			errors.append(_("Package version is undefined."))

		# XXX to do...

		return errors
Example #9
0
    def lint(self):
        errors = []

        if not self.name:
            errors.append(_("Package name is undefined."))

        if not self.version:
            errors.append(_("Package version is undefined."))

        # XXX to do...

        return errors
Example #10
0
	def update_metadata(self, force=False, offline=False):
		filename = os.path.join(METADATA_DOWNLOAD_PATH, METADATA_DOWNLOAD_FILE)
		cache_filename = self.cache_path(os.path.basename(filename))

		# Check if the metadata is already recent enough...
		exists = self.cache.exists(cache_filename)

		if not exists and offline:
			raise OfflineModeError, _("No metadata available for repository %s. Cannot download any.") \
				% self.name

		elif exists and offline:
			# Repository metadata exists. We cannot update anything because of the offline mode.
			return

		if not force and exists:
			age = self.cache.age(cache_filename)
			if age and age < TIME_10M:
				log.debug("Metadata is recent enough. I don't download it again.")
				return

		# Going to download metada.
		log.debug("Going to download repository metadata for %s..." % self.name)
		assert not offline

		grabber = downloader.MetadataDownloader(self.pakfire)
		grabber = self.mirrors.group(grabber)

		while True:
			try:
				data = grabber.urlread(filename, limit=METADATA_DOWNLOAD_LIMIT)
			except urlgrabber.grabber.URLGrabError, e:
				if e.errno == 256:
					raise DownloadError, _("Could not update metadata for %s from any mirror server") % self.name

				grabber.increment_mirror(grabber)
				continue

			# Parse new metadata for comparison.
			md = metadata.Metadata(self.pakfire, metadata=data)

			if self.metadata and md < self.metadata:
				log.warning(_("The downloaded metadata was less recent than the current one."))
				grabber.increment_mirror(grabber)
				continue

			# If the download went well, we write the downloaded data to disk
			# and break the loop.
			f = self.cache.open(cache_filename, "w")
			f.write(data)
			f.close()

			break
Example #11
0
	def terminate_worker(self, worker):
		"""
			Terminates the given worker.
		"""
		log.warning(_("Terminating worker process: %s") % worker)

		worker.terminate()
Example #12
0
	def add_packages(self, files):
		# Search for possible package files in the paths.
		files = self.search_files(*files)

		# Give up if there are no files to process.
		if not files:
			return

		# Create progress bar.
		pb = util.make_progress(_("%s: Adding packages...") % self.name, len(files))
		i = 0

		for file in files:
			if pb:
				i += 1
				pb.update(i)

			# Add the package to the repository.
			self.add_package(file, optimize_index=False)

		# Optimize the index.
		self.optimize_index()

		if pb:
			pb.finish()

		# Optimize the index.
		self.index.optimize()
Example #13
0
	def run(self, resultdir):
		# Create resultdir if it does not exist yet.
		if not os.path.exists(resultdir):
			os.makedirs(resultdir)

		log.info(_("Building source package %s:") % self.pkg.package_filename)

		# The filename where this source package is saved at.
		target_filename = os.path.join(resultdir, self.pkg.package_filename)

		# Add datafile to package.
		datafile = self.create_datafile()

		# Create filelist out of data.
		filelist = self.create_filelist(datafile)

		# Create metadata.
		metafile = self.create_metafile(datafile)

		# Add files to the tar archive in correct order.
		self.add(metafile, "info")
		self.add(filelist, "filelist")
		self.add(datafile, "data.img")

		# Build the final tarball.
		try:
			self.save(target_filename)
		except:
			# Remove the target file when anything went wrong.
			os.unlink(target_filename)
			raise

		return target_filename
Example #14
0
    def open(self):
        # Initialize database.
        self.db.initialize()

        # Create a progressbar.
        pb = util.make_progress(_("Loading installed packages"), len(self.db))

        # Remove all data from the current index.
        self.index.clear()

        i = 0
        for pkg in self.db.packages:
            if pb:
                i += 1
                pb.update(i)

            self.index.add_package(pkg)

        self.index.optimize()

        if pb:
            pb.finish()

        # Mark repo as open.
        self.opened = True
Example #15
0
	def cpu_model(self):
		# Determine CPU model
		cpuinfo = {}
		with open("/proc/cpuinfo") as f:
			for line in f.readlines():
				# Break at an empty line, because all information after that
				# is redundant.
				if not line:
					break

				try:
					key, value = line.split(":")
				except:
					pass # Skip invalid lines

				key, value = key.strip(), value.strip()

				cpuinfo[key] = value

		ret = None
		if self.uname.startswith("arm"):
			try:
				ret = "%(Hardware)s - %(Processor)s" % cpuinfo
			except KeyError:
				pass
		else:
			ret = cpuinfo.get("model name", None)

		return ret or _("Could not be determined")
Example #16
0
	def open(self):
		# Initialize database.
		self.db.initialize()

		# Create a progressbar.
		pb = util.make_progress(_("Loading installed packages"), len(self.db))

		# Remove all data from the current index.
		self.index.clear()

		i = 0
		for pkg in self.db.packages:
			if pb:
				i += 1
				pb.update(i)

			self.index.add_package(pkg)

		self.index.optimize()

		if pb:
			pb.finish()

		# Mark repo as open.
		self.opened = True
Example #17
0
    def add_packages(self, files):
        # Search for possible package files in the paths.
        files = self.search_files(*files)

        # Give up if there are no files to process.
        if not files:
            return

        # Create progress bar.
        pb = util.make_progress(
            _("%s: Adding packages...") % self.name, len(files))
        i = 0

        for file in files:
            if pb:
                i += 1
                pb.update(i)

            # Add the package to the repository.
            self.add_package(file, optimize_index=False)

        # Optimize the index.
        self.optimize_index()

        if pb:
            pb.finish()

        # Optimize the index.
        self.index.optimize()
Example #18
0
	def download(self, files):
		existant_files = []
		download_files = []

		for file in files:
			filename = os.path.join(SOURCE_CACHE_DIR, file)
			log.debug("Checking existance of %s..." % filename)

			if os.path.exists(filename) and os.path.getsize(filename):
				log.debug("...exists!")
				existant_files.append(filename)
			else:
				log.debug("...does not exist!")
				download_files.append(filename)

		if download_files:
			log.info(_("Downloading source files:"))

			if self.pakfire.offline:
				raise OfflineModeError, _("Cannot download source code in offline mode.")

			# Create source download directory.
			if not os.path.exists(SOURCE_CACHE_DIR):
				os.makedirs(SOURCE_CACHE_DIR)

			for filename in download_files:
				try:
					self.grabber.urlgrab(os.path.basename(filename), filename=filename)
				except URLGrabError, e:
					# Remove partly downloaded file.
					try:
						os.unlink(filename)
					except OSError:
						pass

					raise DownloadError, "%s %s" % (os.path.basename(filename), e)

				# Check if the downloaded file was empty.
				if os.path.getsize(filename) == 0:
					# Remove the file and raise an error.
					os.unlink(filename)

					raise DownloadError, _("Downloaded empty file: %s") \
						% os.path.basename(filename)

			log.info("")
Example #19
0
	def build_date(self):
		"""
			Automatically convert the UNIX timestamp from self.build_time to
			a humanly readable format.
		"""
		if self.build_time is None:
			return _("Not set")

		return "%s UTC" % datetime.datetime.utcfromtimestamp(self.build_time)
Example #20
0
	def shutdown(self):
		"""
			Terminates all workers and exists the daemon.
		"""
		if not self.__running:
			return

		log.info(_("Shutting down..."))
		self.__running = False
Example #21
0
    def filter_deps(deps, filters):
        if not filters:
            return deps

        _filters = []
        filtered_deps = []

        # Compile all filters.
        for filter in filters.splitlines():
            # Convert to raw string to make escaping characters
            # easy to the user.
            try:
                filter = "%r" % filter
                _filter = re.compile(filter[1:-1])
            except re.error:
                log.warning(
                    _("Regular experession is invalid and has been skipped: %s"
                      ) % filter)
                continue

            _filters.append(_filter)

        filters = _filters

        for dep in deps:
            filtered = False
            for filter in filters:
                # Search for a match anywhere in the line.
                m = re.search(filter, dep)
                if not m:
                    continue

                # Let the user know what has been done.
                log.info(_("Filter '%(pattern)s' filtered %(dep)s.") % \
                 { "pattern" : filter.pattern, "dep" : dep })

                # Yes, we found a match.
                filtered = True
                break

            if not filtered:
                filtered_deps.append(dep)

        return filtered_deps
Example #22
0
	def filter_deps(deps, filters):
		if not filters:
			return deps

		_filters = []
		filtered_deps = []

		# Compile all filters.
		for filter in filters.splitlines():
			# Convert to raw string to make escaping characters
			# easy to the user.
			try:
				filter = "%r" % filter
				_filter = re.compile(filter[1:-1])
			except re.error:
				log.warning(_("Regular experession is invalid and has been skipped: %s") % filter)
				continue

			_filters.append(_filter)

		filters = _filters

		for dep in deps:
			filtered = False
			for filter in filters:
				# Search for a match anywhere in the line.
				m = re.search(filter, dep)
				if not m:
					continue

				# Let the user know what has been done.
				log.info(_("Filter '%(pattern)s' filtered %(dep)s.") % \
					{ "pattern" : filter.pattern, "dep" : dep })

				# Yes, we found a match.
				filtered = True
				break

			if not filtered:
				filtered_deps.append(dep)

		return filtered_deps
Example #23
0
	def initialize(self):
		# Nothing to do, if everything is already up to date.
		if self.initialized:
			return

		log.info(_("Initializing repositories..."))
		for repo in self:
			repo.open()

		# Empty line.
		log.info("")
Example #24
0
	def parse_inherit(self):
		line = self.get_line(self._lineno)

		m = re.match(LEXER_PACKAGE_INHERIT, line)
		if not m:
			raise LexerError, "Not a template inheritance: %s" % line

		self._lineno += 1

		self._template = m.group(1)

		# Check if template exists.
		if not self.template:
			log.warning(_("Template does not exist: %s") % self._template)
Example #25
0
    def extract(self, member, path=""):
        target = os.path.join(path, member.name)

        # Remove symlink targets, because tarfile cannot replace them.
        if member.issym():
            try:
                os.unlink(target)
            except OSError:
                pass

        # Extract file the normal way...
        try:
            tarfile.TarFile.extract(self, member, path)
        except OSError, e:
            log.warning(_("Could not extract file: /%(src)s - %(dst)s") \
             % { "src" : member.name, "dst" : e, })
Example #26
0
	def extract(self, member, path=""):
		target = os.path.join(path, member.name)

		# Remove symlink targets, because tarfile cannot replace them.
		if member.issym():
			try:
				os.unlink(target)
			except OSError:
				pass

		# Extract file the normal way...
		try:
			tarfile.TarFile.extract(self, member, path)
		except OSError, e:
			log.warning(_("Could not extract file: /%(src)s - %(dst)s") \
				% { "src" : member.name, "dst" : e, })
Example #27
0
    def track_dependencies(self, builder, path):
        # Build filelist with all files that have been installed.
        f = tempfile.NamedTemporaryFile(mode="w", delete=False)
        filelist = f.name

        try:
            for dir, subdirs, files in os.walk(path):
                f.write("%s\n" % dir)

                for file in files:
                    file = os.path.join(dir, file)
                    f.write("%s\n" % file)

                    file = "/%s" % os.path.relpath(file, path)
                    self._filelist.add(file)
            f.close()

            log.info(
                _("Searching for automatic dependencies for %s...") %
                self.friendly_name)

            # Search for provides.
            res = builder.run_script("find-provides", path, filelist)
            provides = set(res.splitlines())

            # Search for requires.
            res = builder.run_script("find-requires", path, filelist)
            requires = set(res.splitlines()) - provides

        finally:
            os.unlink(filelist)

        self._dependencies["provides"] = provides
        self._dependencies["requires"] = requires

        # Filter dependencies.
        for key in ("prerequires", "requires", "provides", "conflicts",
                    "obsoletes"):
            # Make sure this is a list.
            try:
                self._dependencies[key] = list(self._dependencies[key])
            except KeyError:
                self._dependencies[key] = []

            # Filter out unwanted elements.
            self._dependencies[key] = self.filter_deps(
                self._dependencies[key], self.lexer.get_var("filter_%s" % key))
Example #28
0
	def track_dependencies(self, builder, path):
		# Build filelist with all files that have been installed.
		f = tempfile.NamedTemporaryFile(mode="w", delete=False)
		filelist = f.name

		try:
			for dir, subdirs, files in os.walk(path):
				f.write("%s\n" % dir)

				for file in files:
					file = os.path.join(dir, file)
					f.write("%s\n" % file)

					file = "/%s" % os.path.relpath(file, path)
					self._filelist.add(file)
			f.close()

			log.info(_("Searching for automatic dependencies for %s...") % self.friendly_name)

			# Search for provides.
			res = builder.run_script("find-provides", path, filelist)
			provides = set(res.splitlines())

			# Search for requires.
			res = builder.run_script("find-requires", path, filelist)
			requires = set(res.splitlines()) - provides

		finally:
			os.unlink(filelist)

		self._dependencies["provides"] = provides
		self._dependencies["requires"] = requires

		# Filter dependencies.
		for key in ("prerequires", "requires", "provides", "conflicts", "obsoletes"):
			# Make sure this is a list.
			try:
				self._dependencies[key] = list(self._dependencies[key])
			except KeyError:
				self._dependencies[key] = []

			# Filter out unwanted elements.
			self._dependencies[key] = self.filter_deps(
				self._dependencies[key], self.lexer.get_var("filter_%s" % key)
			)
Example #29
0
	def create_datafile(self):
		# Create a list of all files that have to be put into the
		# package.
		files = []

		# Download all files that go into the package.
		for file in self.pkg.download():
			assert os.path.getsize(file), "Don't package empty files"
			files.append(("files/%s" % os.path.basename(file), file))

		# Add all files in the package directory.
		for file in self.pkg.files:
			files.append((os.path.relpath(file, self.pkg.path), file))

		# Add files in alphabetical order.
		files.sort()

		# Load progressbar.
		message = "%-10s : %s" % (_("Packaging"), self.pkg.friendly_name)
		pb = util.make_progress(message, len(files), eta=False)

		filename = self.mktemp()
		if self.payload_compression == "xz":
			datafile = tar.InnerTarFileXz.open(filename, mode="w")
		else:
			datafile = tar.InnerTarFile.open(filename, mode="w")

		i = 0
		for arcname, file in files:
			if pb:
				i += 1
				pb.update(i)

			datafile.add(file, arcname)
		datafile.close()

		if pb:
			pb.finish()

		return filename
Example #30
0
	def open(self):
		# Find all files in the repository dir.
		files = self.search_files(self.path)

		# Create progress bar.
		pb = util.make_progress(_("%s: Reading packages...") % self.name, len(files))
		i = 0

		# Add all files to the index.
		for file in files:
			if pb:
				i += 1
				pb.update(i)

			pkg = packages.open(self.pakfire, self, file)
			self.index.add_package(pkg)

		if pb:
			pb.finish()

		# Mark repo as open.
		self.opened = True
Example #31
0
	def restart_keepalive(self, wait=None):
		log.critical(_("Restarting keepalive process"))

		# Send SIGTERM to really end the process.
		self.keepalive.terminate()

		# Wait for the process to terminate.
		if wait:
			self.keepalive.join(wait)

		# Remove the keepalive process from the process list.
		try:
			self.__workers.remove(self.keepalive)
		except ValueError:
			pass

		# Create a new process and start it.
		self.keepalive = PakfireDaemonKeepalive(self.config)
		self.keepalive.start()

		# Add the process to the process list.
		self.__workers.append(self.keepalive)
Example #32
0
	def send_builder_info(self):
		log.info(_("Sending builder information to hub..."))

		data = {
			# CPU info
			"cpu_model"       : system.cpu_model,
			"cpu_count"       : system.cpu_count,
			"cpu_arch"        : system.native_arch,
			"cpu_bogomips"    : system.cpu_bogomips,

			# Memory + swap
			"mem_total"       : system.memory,
			"swap_total"      : system.swap_total,

			# Pakfire + OS
			"pakfire_version" : PAKFIRE_VERSION,
			"host_key"        : self.config.get("signatures", "host_key", None),
			"os_name"         : system.distro.pretty_name,

			# Supported arches
			"supported_arches" : ",".join(system.supported_arches),
		}
		self.transport.post("/builders/info", data=data)
Example #33
0
    def open(self):
        # Find all files in the repository dir.
        files = self.search_files(self.path)

        # Create progress bar.
        pb = util.make_progress(
            _("%s: Reading packages...") % self.name, len(files))
        i = 0

        # Add all files to the index.
        for file in files:
            if pb:
                i += 1
                pb.update(i)

            pkg = packages.open(self.pakfire, self, file)
            self.index.add_package(pkg)

        if pb:
            pb.finish()

        # Mark repo as open.
        self.opened = True
Example #34
0
	def info(self):
		ret = []

		ret.append("")
		ret.append("  PAKFIRE %s" % PAKFIRE_VERSION)
		ret.append("")
		ret.append("  %-20s: %s" % (_("Hostname"), self.hostname))
		ret.append("")

		# Hardware information
		ret.append("  %s:" % _("Hardware information"))
		ret.append("      %-16s: %s" % (_("CPU model"), self.cpu_model))
		ret.append("      %-16s: %s" % (_("Memory"),    pakfire.util.format_size(self.memory)))
		ret.append("")
		ret.append("      %-16s: %s" % (_("Native arch"), system.native_arch))

		header = _("Supported arches")
		for arch in self.config.supported_arches:
			ret.append("      %-16s: %s" % (header, arch))
			header = ""
		ret.append("")

		return ret
Example #35
0
	def create_datafile(self):
		includes = []
		excludes = []

		# List of all patterns, which grows.
		patterns = self.pkg.files

		# ...
		orphan_directories = []
		for d in ORPHAN_DIRECTORIES:
			if d.startswith("usr/"):
				b = os.path.basename(d)
				b = os.path.join(self.buildroot, b)

				if os.path.islink(b):
					continue

			d = os.path.join(self.buildroot, d)
			if not os.path.islink(d):
				orphan_directories.append(d)

		for pattern in patterns:
			# Check if we are running in include or exclude mode.
			if pattern.startswith("!"):
				files = excludes

				# Strip the ! character.
				pattern = pattern[1:]
			else:
				files = includes

			# Expand file to point to chroot.
			if pattern.startswith("/"):
				pattern = pattern[1:]
			pattern = os.path.join(self.buildroot, pattern)

			# Recognize the type of the pattern. Patterns could be a glob
			# pattern that is expanded here or just a directory which will
			# be included recursively.
			if "*" in pattern or "?" in pattern or ("[" in pattern and "]" in pattern):
				_patterns = glob.glob(pattern)
			else:
				_patterns = [pattern,]

			for pattern in _patterns:
				# Try to stat the pattern. If that is not successful, we cannot go on.
				try:
					os.lstat(pattern)
				except OSError:
					continue

				# Add directories recursively but skip those symlinks
				# that point to a directory.
				if os.path.isdir(pattern) and not os.path.islink(pattern):
					# Add directory itself.
					files.append(pattern)

					for dir, subdirs, _files in os.walk(pattern):
						for subdir in subdirs:
							if subdir in orphan_directories:
								continue

							subdir = os.path.join(dir, subdir)
							files.append(subdir)

						for file in _files:
							file = os.path.join(dir, file)
							files.append(file)

				# All other files are just added.
				else:
					files.append(pattern)

		files = []
		for file in includes:
			# Skip if file is already in the file set or
			# marked to be excluded from this archive.
			if file in excludes or file in files:
				continue

			# Skip orphan directories.
			if file in orphan_directories and not os.listdir(file):
				log.debug("Found an orphaned directory: %s" % file)
				continue

			files.append(file)

			while True:
				file = os.path.dirname(file)

				if file == self.buildroot:
					break

				if not file in files:
					files.append(file)

		files.sort()

		# Load progressbar.
		message = "%-10s : %s" % (_("Packaging"), self.pkg.friendly_name)
		pb = util.make_progress(message, len(files), eta=False)

		datafile = self.mktemp()
		if self.payload_compression == "xz":
			t = tar.InnerTarFileXz.open(datafile, mode="w")
		else:
			t = tar.InnerTarFile.open(datafile, mode="w")

		# All files in the tarball are relative to this directory.
		basedir = self.buildroot

		i = 0
		for file in files:
			if pb:
				i += 1
				pb.update(i)

			# Never package /.
			if os.path.normpath(file) == os.path.normpath(basedir):
				continue

			# Name of the file in the archive.
			arcname = "/%s" % os.path.relpath(file, basedir)

			# Add file to tarball.
			t.add(file, arcname=arcname, recursive=False)

		# Remove all packaged files.
		for file in reversed(files):
			# It's okay if we cannot remove directories,
			# when they are not empty.
			if os.path.isdir(file):
				try:
					os.rmdir(file)
				except OSError:
					continue
			else:
				try:
					os.unlink(file)
				except OSError:
					pass

			while True:
				file = os.path.dirname(file)

				if not file.startswith(basedir):
					break

				try:
					os.rmdir(file)
				except OSError:
					break

		# Close the tarfile.
		t.close()

		# Finish progressbar.
		if pb:
			pb.finish()

		return datafile
Example #36
0
	def download(self, pkg, text="", logger=None):
		"""
			Downloads 'filename' from repository and returns the local filename.
		"""
		if logger is None:
			logger = log

		filename, hash1 = pkg.filename, pkg.hash1

		# Marker, if we need to download the package.
		download = True

		cache_filename = pkg.cache_filename

		# Check if file already exists in cache.
		if self.cache.exists(cache_filename):
			logger.debug("File exists in cache: %s" % filename)

			# If the file does already exist, we check if the hash1 matches.
			if hash1 and self.cache.verify(cache_filename, hash1):
				# We already got the right file. Skip download.
				download = False
			else:
				# The file in cache has a wrong hash. Remove it and repeat download.
				self.cache.remove(cache_filename)

		# Get a package grabber and add mirror download capabilities to it.
		grabber = downloader.PackageDownloader(
			self.pakfire,
			text=text + os.path.basename(filename),
		)
		grabber = self.mirrors.group(grabber)

		# Make sure filename is of type string (and not unicode)
		filename = str(filename)

		while download:
			logger.debug("Going to download %s" % filename)

			# If we are in offline mode, we cannot download any files.
			if self.pakfire.offline and not self.baseurl.startswith("file://"):
				raise OfflineModeError, _("Cannot download this file in offline mode: %s") \
					% filename

			try:
				i = grabber.urlopen(filename)
			except urlgrabber.grabber.URLGrabError, e:
				raise DownloadError, _("Could not download %s: %s") % (filename, e)

			# Open input and output files and download the file.
			o = self.cache.open(cache_filename, "w")

			buf = i.read(BUFFER_SIZE)
			while buf:
				o.write(buf)
				buf = i.read(BUFFER_SIZE)

			i.close()
			o.close()

			# Calc the hash1 of the downloaded file.
			calc_hash1 = self.cache.hash1(cache_filename)

			if calc_hash1 == hash1:
				logger.debug("Successfully downloaded %s (%s)." % (filename, hash1))
				break

			sums = {
				"good" : hash1,
				"bad"  : calc_hash1,
			}

			logger.warning(_("The checksum of the downloaded file did not match."))
			logger.warning(_("Expected %(good)s but got %(bad)s.") % sums)
			logger.warning(_("Trying an other mirror."))

			# Remove the bad file.
			self.cache.remove(cache_filename)

			# Go to the next mirror.
			grabber.increment_mirror(grabber)
Example #37
0
    def download(self, pkg, text="", logger=None):
        """
			Downloads 'filename' from repository and returns the local filename.
		"""
        if logger is None:
            logger = log

        filename, hash1 = pkg.filename, pkg.hash1

        # Marker, if we need to download the package.
        download = True

        cache_filename = pkg.cache_filename

        # Check if file already exists in cache.
        if self.cache.exists(cache_filename):
            logger.debug("File exists in cache: %s" % filename)

            # If the file does already exist, we check if the hash1 matches.
            if hash1 and self.cache.verify(cache_filename, hash1):
                # We already got the right file. Skip download.
                download = False
            else:
                # The file in cache has a wrong hash. Remove it and repeat download.
                self.cache.remove(cache_filename)

        # Get a package grabber and add mirror download capabilities to it.
        grabber = downloader.PackageDownloader(
            self.pakfire,
            text=text + os.path.basename(filename),
        )
        grabber = self.mirrors.group(grabber)

        # Make sure filename is of type string (and not unicode)
        filename = str(filename)

        while download:
            logger.debug("Going to download %s" % filename)

            # If we are in offline mode, we cannot download any files.
            if self.pakfire.offline and not self.baseurl.startswith("file://"):
                raise OfflineModeError, _("Cannot download this file in offline mode: %s") \
                 % filename

            try:
                i = grabber.urlopen(filename)
            except urlgrabber.grabber.URLGrabError, e:
                raise DownloadError, _("Could not download %s: %s") % (
                    filename, e)

            # Open input and output files and download the file.
            o = self.cache.open(cache_filename, "w")

            buf = i.read(BUFFER_SIZE)
            while buf:
                o.write(buf)
                buf = i.read(BUFFER_SIZE)

            i.close()
            o.close()

            # Calc the hash1 of the downloaded file.
            calc_hash1 = self.cache.hash1(cache_filename)

            if calc_hash1 == hash1:
                logger.debug("Successfully downloaded %s (%s)." %
                             (filename, hash1))
                break

            sums = {
                "good": hash1,
                "bad": calc_hash1,
            }

            logger.warning(
                _("The checksum of the downloaded file did not match."))
            logger.warning(_("Expected %(good)s but got %(bad)s.") % sums)
            logger.warning(_("Trying an other mirror."))

            # Remove the bad file.
            self.cache.remove(cache_filename)

            # Go to the next mirror.
            grabber.increment_mirror(grabber)
Example #38
0
	def save(self, path=None, algo="xz"):
		"""
			This function saves the database and metadata to path so it can
			be exported to a remote repository.
		"""
		if not path:
			path = self.path

		# Create filenames
		metapath = os.path.join(path, METADATA_DOWNLOAD_PATH)
		db_path = os.path.join(metapath, METADATA_DATABASE_FILE)
		md_path = os.path.join(metapath, METADATA_DOWNLOAD_FILE)

		# Remove all pre-existing metadata.
		if os.path.exists(metapath):
			util.rm(metapath)

		# Create directory for metdadata.
		os.makedirs(metapath)

		# Save the database to path and get the filename.
		self.index.write(db_path)

		# Make a reference to the database file that it will get a unique name
		# so we won't get into any trouble with caching proxies.
		db_hash = util.calc_hash1(db_path)

		db_path2 = os.path.join(os.path.dirname(db_path),
			"%s-%s" % (db_hash, os.path.basename(db_path)))

		# Compress the database.
		if algo:
			# Open input file and get filesize of input file.
			f = open(db_path)
			filesize = os.path.getsize(db_path)

			# Make a nice progress bar.
			p = util.make_progress(_("Compressing database..."), filesize)

			# Create compressing file handler.
			c = compress.compressobj(db_path2)

			try:
				size = 0
				while True:
					buf = f.read(BUFFER_SIZE)
					if not buf:
						break

					if p:
						size += len(buf)
						p.update(size)

					c.write(buf)
			except:
				# XXX catch compression errors
				raise

			finally:
				f.close()
				c.close()
				p.finish()

				# Remove old database.
				os.unlink(db_path)

		else:
			shutil.move(db_path, db_path2)

		# Create a new metadata object and add out information to it.
		md = metadata.Metadata(self.pakfire)

		# Save name of the hashed database to the metadata.
		md.database = os.path.basename(db_path2)
		md.database_hash1 = db_hash
		md.database_compression = algo

		# Save metdata to repository.
		md.save(md_path)
Example #39
0
	def extract(self, message=None, prefix=None):
		log.debug("Extracting package %s" % self.friendly_name)

		if prefix is None:
			prefix = ""

		# Open package data for read.
		payload_archive = self.open_payload_archive()

		# Load progressbar.
		pb = None
		if message:
			message = "%-10s : %s" % (message, self.friendly_name)
			pb = util.make_progress(message, len(self.filelist), eta=False)

		# Collect messages with errors and warnings, that are passed to
		# the user.
		messages = []

		name2file = {}
		for file in self.filelist:
			if file.is_dir() and file.name.endswith("/"):
				name = file.name[:-1]
			else:
				name = file.name

			name2file[name] = file

		i = 0
		while True:
			member = payload_archive.next()
			if not member:
				break

			# Check if file is also known in metadata.
			name = member.name
			if not name.startswith("/"):
				name = "/%s" % name

			try:
				file = name2file[name]
			except KeyError:
				log.warning(_("File in archive is missing in file metadata: %s. Skipping.") % name)
				continue

			# Update progress.
			if pb:
				i += 1
				pb.update(i)

			target = os.path.join(prefix, member.name)

			# Check if a configuration file is already present. We don't want to
			# overwrite that.
			if file.is_config():
				config_save = "%s%s" % (target, CONFIG_FILE_SUFFIX_SAVE)
				config_new  = "%s%s" % (target, CONFIG_FILE_SUFFIX_NEW)

				if os.path.exists(config_save) and not os.path.exists(target):
					# Extract new configuration file, save it as CONFIG_FILE_SUFFIX_NEW,
					# and reuse _SAVE.
					payload_archive.extract(member, path=prefix)

					shutil.move(target, config_new)
					shutil.move(config_save, target)
					continue

				elif os.path.exists(target):
					# If the files are identical, we skip the extraction of a
					# new configuration file. We also do that when the new configuration file
					# is a dummy file.
					if file.size == 0:
						continue

					# Calc hash of the current configuration file.
					config_hash1 = hashlib.new("sha512")
					f = open(target)
					while True:
						buf = f.read(BUFFER_SIZE)
						if not buf:
							break
						config_hash1.update(buf)
					f.close()

					if file.hash1 == config_hash1.hexdigest():
						continue

					# Backup old configuration file and extract new one.
					shutil.move(target, config_save)
					payload_archive.extract(member, path=prefix)

					# Save new configuration file as CONFIG_FILE_SUFFIX_NEW and
					# restore old configuration file.
					shutil.move(target, config_new)
					shutil.move(config_save, target)

					if prefix:
						config_new = os.path.relpath(config_new, prefix)
					messages.append(_("Config file created as %s") % config_new)
					continue

			# Don't overwrite target files if they already exist.
			if file.is_datafile() and os.path.exists(target):
				log.debug(_("Don't overwrite already existing datafile '/%s'") % member.name)
				continue

			# If the member is a directory and if it already exists, we
			# don't need to create it again.
			if os.path.exists(target):
				if member.isdir():
					continue

				else:
					# Remove file if it has been existant
					try:
						os.unlink(target)
					except OSError:
						messages.append(_("Could not remove file: /%s") % member.name)

			#if self.pakfire.config.get("debug"):
			#	msg = "Creating file (%s:%03d:%03d) " % \
			#		(tarfile.filemode(member.mode), member.uid, member.gid)
			#	if member.issym():
			#		msg += "/%s -> %s" % (member.name, member.linkname)
			#	elif member.islnk():
			#		msg += "/%s link to /%s" % (member.name, member.linkname)
			#	else:
			#		msg += "/%s" % member.name
			#	log.debug(msg)

			payload_archive.extract(member, path=prefix)

		# Close all open files.
		payload_archive.close()

		if pb:
			pb.finish()

		# Print messages.
		for msg in messages:
			log.warning(msg)
Example #40
0
	def _remove_files(self, files, message, prefix):
		if prefix in ("/", None):
			prefix = ""

		# Load progressbar.
		pb = None
		if message:
			message = "%-10s : %s" % (message, self.friendly_name)
			pb = util.make_progress(message, len(files), eta=False)

		# Sort files by the length of their name to remove all files in
		# a directory first and then check, if there are any files left.
		files.sort(cmp=lambda x,y: cmp(len(x.name), len(y.name)), reverse=True)

		# Messages to the user.
		messages = []

		i = 0
		for _file in files:
			# Update progress.
			if pb:
				i += 1
				pb.update(i)

			log.debug("Removing file: %s" % _file)

			if prefix:
				file = os.path.join(prefix, _file.name[1:])
				assert file.startswith("%s/" % prefix)
			else:
				file = _file.name

			# Rename configuration files.
			if _file.is_config():
				# Skip already removed config files.
				try:
					os.lstat(file)
				except OSError:
					continue

				file_save = "%s%s" % (file, CONFIG_FILE_SUFFIX_SAVE)

				try:
					shutil.move(file, file_save)
				except shutil.Error, e:
					print e

				if prefix:
					file_save = os.path.relpath(file_save, prefix)
				messages.append(_("Config file saved as %s.") % file_save)
				continue

			# Preserve datafiles.
			if _file.is_datafile():
				log.debug(_("Preserving datafile '/%s'") % _file)
				continue

			# Handle regular files and symlinks.
			if os.path.isfile(file) or os.path.islink(file):
				log.debug("Removing %s..." % _file)
				try:
					os.remove(file)
				except OSError:
					log.error("Cannot remove file: %s. Remove manually." % _file)

			# Handle directories.
			# Skip removal if the directory is a mountpoint.
			elif os.path.isdir(file) and not os.path.ismount(file):
				# Try to remove the directory. If it is not empty, OSError is raised,
				# but we are okay with that.
				try:
					os.rmdir(file)
				except OSError:
					pass

			# Handle files that have already been removed
			# by somebody else.
			elif not os.path.exists(file):
				pass

			# Log all unhandled types.
			else:
				log.warning("Cannot remove file: %s. Filetype is unhandled." % file)
Example #41
0
    def migrate(self):
        # If we have already the latest version, there is nothing to do.
        if self.format == DATABASE_FORMAT:
            return

        # Check if database version is supported.
        if self.format > DATABASE_FORMAT:
            raise DatabaseError, _(
                "Cannot use database with version greater than %s."
            ) % DATABASE_FORMAT

        log.info(_("Migrating database from format %(old)s to %(new)s.") % \
         { "old" : self.format, "new" : DATABASE_FORMAT })

        # Get a database cursor.
        c = self.cursor()

        # 1) The vendor column was added.
        if self.format < 1:
            c.execute("ALTER TABLE packages ADD COLUMN vendor TEXT AFTER uuid")

        if self.format < 2:
            c.execute("ALTER TABLE files ADD COLUMN `config` INTEGER")
            c.execute("ALTER TABLE files ADD COLUMN `mode` INTEGER")
            c.execute("ALTER TABLE files ADD COLUMN `user` TEXT")
            c.execute("ALTER TABLE files ADD COLUMN `group` TEXT")
            c.execute("ALTER TABLE files ADD COLUMN `mtime` INTEGER")

        if self.format < 3:
            c.execute("ALTER TABLE files ADD COLUMN `capabilities` TEXT")

        if self.format < 4:
            c.execute(
                "ALTER TABLE packages ADD COLUMN recommends TEXT AFTER obsoletes"
            )
            c.execute(
                "ALTER TABLE packages ADD COLUMN suggests TEXT AFTER recommends"
            )

        if self.format < 5:
            c.execute(
                "ALTER TABLE files ADD COLUMN datafile INTEGER AFTER config")

        if self.format < 6:
            c.execute(
                "ALTER TABLE packages ADD COLUMN inst_size INTEGER AFTER size")

        if self.format < 7:
            c.executescript("""
				CREATE TABLE dependencies(pkg INTEGER, type TEXT, dependency TEXT);
				CREATE INDEX dependencies_pkg_index ON dependencies(pkg);
			""")

            c.execute(
                "SELECT id, provides, requires, conflicts, obsoletes, recommends, suggests FROM packages"
            )
            pkgs = c.fetchall()

            for pkg in pkgs:
                (pkg_id, provides, requires, conflicts, obsoletes, recommends,
                 suggests) = pkg

                dependencies = (
                    ("provides", provides),
                    ("requires", requires),
                    ("conflicts", conflicts),
                    ("obsoletes", obsoletes),
                    ("recommends", recommends),
                    ("suggests", suggests),
                )

                for type, deps in dependencies:
                    c.executemany(
                        "INSERT INTO dependencies(pkg, type, dependency) VALUES(?, ?, ?)",
                        ((pkg_id, type, d) for d in deps.splitlines()))

            c.executescript("""
				CREATE TABLE packages_(
					id INTEGER PRIMARY KEY,
					name TEXT,
					epoch INTEGER,
					version TEXT,
					release TEXT,
					arch TEXT,
					groups TEXT,
					filename TEXT,
					size INTEGER,
					inst_size INTEGER,
					hash1 TEXT,
					license TEXT,
					summary TEXT,
					description TEXT,
					uuid TEXT,
					vendor TEXT,
					build_id TEXT,
					build_host TEXT,
					build_date TEXT,
					build_time INTEGER,
					installed INT,
					reason TEXT,
					repository TEXT
				);

				INSERT INTO packages_ SELECT id, name, epoch, version, release, arch, groups, filename,
					size, inst_size, hash1, license, summary, description, uuid, vendor, build_id,
					build_host, build_date, build_time, installed, reason, repository FROM packages;

				DROP TABLE packages;
				ALTER TABLE packages_ RENAME TO packages;

				DROP TABLE triggers;

				CREATE INDEX files_pkg_index ON files(pkg);
				CREATE INDEX scriptlets_pkg_index ON scriptlets(pkg);
				CREATE INDEX packages_name_index ON packages(name);
			""")

        # In the end, we can easily update the version of the database.
        c.execute("UPDATE settings SET val = ? WHERE key = 'version'",
                  (DATABASE_FORMAT, ))
        self.__format = DATABASE_FORMAT

        self.commit()
        c.close()
Example #42
0
    def execute(self):
        # Save start time.
        self.time_start = time.time()

        if self.logger:
            self.logger.debug(
                _("Executing command: %s in %s") %
                (self.command, self.chroot_path or "/"))

        child = None
        try:
            # Create new child process
            child = self.create_subprocess()

            # Record the output.
            self.tee_log(child)
        except:
            # In case there has been an error, kill children if they aren't done
            if child and child.returncode is None:
                os.killpg(child.pid, 9)

            try:
                if child:
                    os.waitpid(child.pid, 0)
            except:
                pass

            # Raise original exception.
            raise

        finally:
            # Save end time.
            self.time_end = time.time()

        # wait until child is done, kill it if it passes timeout
        nice_exit = True
        while child.poll() is None:
            if self.timeout_has_been_exceeded():
                nice_exit = False
                os.killpg(child.pid, 15)

            if self.timeout_has_been_exceeded(3):
                nice_exit = False
                os.killpg(child.pid, 9)

        if not nice_exit:
            raise commandTimeoutExpired, (
                _("Command exceeded timeout (%(timeout)d): %(command)s") %
                (self.timeout, self.command))

        # Save exitcode.
        self.exitcode = child.returncode

        if self.logger:
            self.logger.debug(_("Child returncode was: %s") % self.exitcode)

        if self.exitcode and self.log_errors:
            raise ShellEnvironmentError, (_("Command failed: %s") %
                                          self.command, self.exitcode)

        return self.exitcode
Example #43
0
	def dump(self, short=False, long=False, filelist=False):
		if short:
			return "%s.%s : %s" % (self.name, self.arch, self.summary)

		items = [
			(_("Name"), self.name),
		]

		# Show supported arches if available.
		if hasattr(self, "supported_arches") and not self.supported_arches == "all":
			arch = "%s (%s)" % (self.arch, self.supported_arches)
		else:
			arch = self.arch
		items.append((_("Arch"), arch))

		items += [
			(_("Version"), self.version),
			(_("Release"), self.release),
		]

		if self.size:
			items.append((_("Size"), util.format_size(self.size)))

		if self.inst_size:
			items.append(
				(_("Installed size"),
				util.format_size(self.inst_size))
			)

		# filter dummy repository
		if not self.repo == self.pakfire.repos.dummy:
			items.append((_("Repo"), self.repo.name))

		items += [
			(_("Summary"), self.summary),
			(_("Groups"), " ".join(self.groups)),
			(_("URL"), self.url),
			(_("License"), self.license),
		]

		caption = _("Description")
		for line in util.text_wrap(self.description):
			items.append((caption, line))
			caption = ""

		if long:
			if self.maintainer:
				items.append((_("Maintainer"), self.maintainer))

			items.append((_("Vendor"), self.vendor))

			items.append((_("UUID"), self.uuid))
			items.append((_("Build ID"), self.build_id))
			items.append((_("Build date"), self.build_date))
			items.append((_("Build host"), self.build_host))

			caption = _("Signatures")
			for sig in self.signatures:
				items.append((caption, sig))
				caption = ""

			caption = _("Provides")
			for prov in sorted(self.provides):
				items.append((caption, prov))
				caption = ""

			caption = _("Pre-requires")
			for req in sorted(self.prerequires):
				items.append((caption, req))
				caption = ""

			caption = _("Requires")
			for req in sorted(self.requires):
				items.append((caption, req))
				caption = ""

			caption = _("Conflicts")
			for req in sorted(self.conflicts):
				items.append((caption, req))
				caption = ""

			caption = _("Obsoletes")
			for req in sorted(self.obsoletes):
				items.append((caption, req))
				caption = ""

			caption = _("Recommends")
			for req in sorted(self.recommends):
				items.append((caption, req))
				caption = ""

			caption = _("Suggests")
			for req in sorted(self.suggests):
				items.append((caption, req))
				caption = ""

		# Append filelist if requested.
		if filelist:
			for file in self.filelist:
				items.append((_("File"), file))

		format = "%%-%ds : %%s" % (max([len(k) for k, v in items]))

		s = []
		for caption, value in items:
			s.append(format % (caption, value))

		s.append("") # New line at the end

		# XXX why do we need to decode this?
		return "\n".join([str.decode("utf-8") for str in s])
Example #44
0
    def save(self, path=None, algo="xz"):
        """
			This function saves the database and metadata to path so it can
			be exported to a remote repository.
		"""
        if not path:
            path = self.path

        # Create filenames
        metapath = os.path.join(path, METADATA_DOWNLOAD_PATH)
        db_path = os.path.join(metapath, METADATA_DATABASE_FILE)
        md_path = os.path.join(metapath, METADATA_DOWNLOAD_FILE)

        # Remove all pre-existing metadata.
        if os.path.exists(metapath):
            util.rm(metapath)

        # Create directory for metdadata.
        os.makedirs(metapath)

        # Save the database to path and get the filename.
        self.index.write(db_path)

        # Make a reference to the database file that it will get a unique name
        # so we won't get into any trouble with caching proxies.
        db_hash = util.calc_hash1(db_path)

        db_path2 = os.path.join(os.path.dirname(db_path),
                                "%s-%s" % (db_hash, os.path.basename(db_path)))

        # Compress the database.
        if algo:
            # Open input file and get filesize of input file.
            f = open(db_path)
            filesize = os.path.getsize(db_path)

            # Make a nice progress bar.
            p = util.make_progress(_("Compressing database..."), filesize)

            # Create compressing file handler.
            c = compress.compressobj(db_path2)

            try:
                size = 0
                while True:
                    buf = f.read(BUFFER_SIZE)
                    if not buf:
                        break

                    if p:
                        size += len(buf)
                        p.update(size)

                    c.write(buf)
            except:
                # XXX catch compression errors
                raise

            finally:
                f.close()
                c.close()
                p.finish()

                # Remove old database.
                os.unlink(db_path)

        else:
            shutil.move(db_path, db_path2)

        # Create a new metadata object and add out information to it.
        md = metadata.Metadata(self.pakfire)

        # Save name of the hashed database to the metadata.
        md.database = os.path.basename(db_path2)
        md.database_hash1 = db_hash
        md.database_compression = algo

        # Save metdata to repository.
        md.save(md_path)