コード例 #1
0
ファイル: packager.py プロジェクト: pombredanne/pakfire
	def __del__(self):
		for file in self.tmpfiles:
			if not os.path.exists(file):
				continue

			log.debug("Removing tmpfile: %s" % file)

			if os.path.isdir(file):
				util.rm(file)
			else:
				os.remove(file)
コード例 #2
0
ファイル: local.py プロジェクト: pombredanne/pakfire
 def remove(self):
     self.index.clear()
     util.rm(self.path)
コード例 #3
0
ファイル: local.py プロジェクト: pombredanne/pakfire
    def save(self, path=None, algo="xz"):
        """
			This function saves the database and metadata to path so it can
			be exported to a remote repository.
		"""
        if not path:
            path = self.path

        # Create filenames
        metapath = os.path.join(path, METADATA_DOWNLOAD_PATH)
        db_path = os.path.join(metapath, METADATA_DATABASE_FILE)
        md_path = os.path.join(metapath, METADATA_DOWNLOAD_FILE)

        # Remove all pre-existing metadata.
        if os.path.exists(metapath):
            util.rm(metapath)

        # Create directory for metdadata.
        os.makedirs(metapath)

        # Save the database to path and get the filename.
        self.index.write(db_path)

        # Make a reference to the database file that it will get a unique name
        # so we won't get into any trouble with caching proxies.
        db_hash = util.calc_hash1(db_path)

        db_path2 = os.path.join(os.path.dirname(db_path),
                                "%s-%s" % (db_hash, os.path.basename(db_path)))

        # Compress the database.
        if algo:
            # Open input file and get filesize of input file.
            f = open(db_path)
            filesize = os.path.getsize(db_path)

            # Make a nice progress bar.
            p = util.make_progress(_("Compressing database..."), filesize)

            # Create compressing file handler.
            c = compress.compressobj(db_path2)

            try:
                size = 0
                while True:
                    buf = f.read(BUFFER_SIZE)
                    if not buf:
                        break

                    if p:
                        size += len(buf)
                        p.update(size)

                    c.write(buf)
            except:
                # XXX catch compression errors
                raise

            finally:
                f.close()
                c.close()
                p.finish()

                # Remove old database.
                os.unlink(db_path)

        else:
            shutil.move(db_path, db_path2)

        # Create a new metadata object and add out information to it.
        md = metadata.Metadata(self.pakfire)

        # Save name of the hashed database to the metadata.
        md.database = os.path.basename(db_path2)
        md.database_hash1 = db_hash
        md.database_compression = algo

        # Save metdata to repository.
        md.save(md_path)
コード例 #4
0
ファイル: local.py プロジェクト: ipfire/pakfire
	def remove(self):
		self.index.clear()
		util.rm(self.path)
コード例 #5
0
ファイル: local.py プロジェクト: ipfire/pakfire
	def save(self, path=None, algo="xz"):
		"""
			This function saves the database and metadata to path so it can
			be exported to a remote repository.
		"""
		if not path:
			path = self.path

		# Create filenames
		metapath = os.path.join(path, METADATA_DOWNLOAD_PATH)
		db_path = os.path.join(metapath, METADATA_DATABASE_FILE)
		md_path = os.path.join(metapath, METADATA_DOWNLOAD_FILE)

		# Remove all pre-existing metadata.
		if os.path.exists(metapath):
			util.rm(metapath)

		# Create directory for metdadata.
		os.makedirs(metapath)

		# Save the database to path and get the filename.
		self.index.write(db_path)

		# Make a reference to the database file that it will get a unique name
		# so we won't get into any trouble with caching proxies.
		db_hash = util.calc_hash1(db_path)

		db_path2 = os.path.join(os.path.dirname(db_path),
			"%s-%s" % (db_hash, os.path.basename(db_path)))

		# Compress the database.
		if algo:
			# Open input file and get filesize of input file.
			f = open(db_path)
			filesize = os.path.getsize(db_path)

			# Make a nice progress bar.
			p = util.make_progress(_("Compressing database..."), filesize)

			# Create compressing file handler.
			c = compress.compressobj(db_path2)

			try:
				size = 0
				while True:
					buf = f.read(BUFFER_SIZE)
					if not buf:
						break

					if p:
						size += len(buf)
						p.update(size)

					c.write(buf)
			except:
				# XXX catch compression errors
				raise

			finally:
				f.close()
				c.close()
				p.finish()

				# Remove old database.
				os.unlink(db_path)

		else:
			shutil.move(db_path, db_path2)

		# Create a new metadata object and add out information to it.
		md = metadata.Metadata(self.pakfire)

		# Save name of the hashed database to the metadata.
		md.database = os.path.basename(db_path2)
		md.database_hash1 = db_hash
		md.database_compression = algo

		# Save metdata to repository.
		md.save(md_path)
コード例 #6
0
ファイル: cache.py プロジェクト: pombredanne/pakfire
	def destroy(self):
		"""
			Remove all files from this cache.
		"""
		if self.created:
			util.rm(self.path)