def run(self): """ """ tap.plan(len(self.testcases)) for t in self.testcases: tap.diag("Running '%s'" % t.testname) t.load() t.generate(self.pacman) t.run(self.pacman) tap.diag("==> Checking rules") tap.todo = t.expectfailure tap.subtest(lambda: t.check(), t.description)
def run(self): """ """ for testcase in self.testcases: t = pmtest.pmtest(testcase, self.root, self.config) t.load() if t.skipall: tap.skip_all("skipping %s (%s)" % (t.description, t.skipall)) else: tap.plan(1) tap.diag("Running '%s'" % t.testname) t.generate(self.pacman) t.run(self.pacman) tap.diag("==> Checking rules") tap.todo = t.expectfailure tap.subtest(lambda: t.check(), t.description)
def run(self): """ """ for testcase in self.testcases: t = pmtest.pmtest(testcase, self.root, self.config) t.load() if t.skipall: tap.skip_all("skipping %s (%s)" % (t.description, t.skipall)) else: tap.plan(1) tap.diag("Running '%s'" % t.testname) t.generate(self.pacman) t.run(self.pacman) tap.diag("==> Checking rules") # When running under meson, we don't emit 'todo' in the plan and instead # handle expected failures in the test() objects. This really should be # fixed in meson: # https://github.com/mesonbuild/meson/issues/2923#issuecomment-614647076 tap.todo = (t.expectfailure and not 'RUNNING_UNDER_MESON' in os.environ) tap.subtest(lambda: t.check(), t.description)
env.pacman["valgrind"] = opts.valgrind env.pacman["manual-confirm"] = opts.manualconfirm env.pacman["scriptlet-shell"] = opts.scriptletshell env.pacman["ldconfig"] = opts.ldconfig try: for i in args: env.addtest(i) except Exception as e: tap.bail(e) os.rmdir(root_path) sys.exit(2) # run tests if not opts.review: env.run() else: # save output in tempfile for review with OutputSaver() as save_file: env.run() files = [save_file.name] + args + glob.glob(root_path + "/var/log/*") subprocess.call([opts.editor] + files) if not opts.keeproot: shutil.rmtree(root_path) else: tap.diag("pacman testing root saved: %s" % root_path) if env.failed > 0: sys.exit(1)
def check(self, test): """ """ success = 1 [testname, args] = self.rule.split("=") if testname[0] == "!": self.false = 1 testname = testname[1:] [kind, case] = testname.split("_") if "|" in args: [key, value] = args.split("|", 1) else: [key, value] = [args, None] if kind == "PACMAN": if case == "RETCODE": if test.retcode != int(key): success = 0 elif case == "OUTPUT": logfile = os.path.join(test.root, util.LOGFILE) if not os.access(logfile, os.F_OK): tap.diag( "LOGFILE not found, cannot validate 'OUTPUT' rule") success = 0 elif not util.grep(logfile, key): success = 0 else: tap.diag("PACMAN rule '%s' not found" % case) success = -1 elif kind == "PKG": localdb = test.db["local"] newpkg = localdb.db_read(key) if not newpkg: success = 0 else: if case == "EXIST": success = 1 elif case == "VERSION": if value != newpkg.version: success = 0 elif case == "DESC": if value != newpkg.desc: success = 0 elif case == "GROUPS": if not value in newpkg.groups: success = 0 elif case == "PROVIDES": if not value in newpkg.provides: success = 0 elif case == "DEPENDS": if not value in newpkg.depends: success = 0 elif case == "OPTDEPENDS": success = 0 for optdep in newpkg.optdepends: if value == optdep.split(':', 1)[0]: success = 1 break elif case == "REASON": if newpkg.reason != int(value): success = 0 elif case == "FILES": if not value in newpkg.files: success = 0 elif case == "BACKUP": success = 0 for f in newpkg.backup: if f.startswith(value + "\t"): success = 1 break else: tap.diag("PKG rule '%s' not found" % case) success = -1 elif kind == "FILE": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.isfile(filename): success = 0 elif case == "EMPTY": if not (os.path.isfile(filename) and os.path.getsize(filename) == 0): success = 0 elif case == "CONTENTS": try: with open(filename, 'r') as f: success = f.read() == value except: success = 0 elif case == "MODIFIED": for f in test.files: if f.name == key: if not f.ismodified(): success = 0 break elif case == "MODE": if not os.path.isfile(filename): success = 0 else: mode = os.lstat(filename)[stat.ST_MODE] if int(value, 8) != stat.S_IMODE(mode): success = 0 elif case == "TYPE": if value == "dir": if not os.path.isdir(filename): success = 0 elif value == "file": if not os.path.isfile(filename): success = 0 elif value == "link": if not os.path.islink(filename): success = 0 elif case == "PACNEW": if not os.path.isfile("%s.pacnew" % filename): success = 0 elif case == "PACSAVE": if not os.path.isfile("%s.pacsave" % filename): success = 0 else: tap.diag("FILE rule '%s' not found" % case) success = -1 elif kind == "DIR": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.isdir(filename): success = 0 else: tap.diag("DIR rule '%s' not found" % case) success = -1 elif kind == "LINK": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.islink(filename): success = 0 else: tap.diag("LINK rule '%s' not found" % case) success = -1 elif kind == "CACHE": cachedir = os.path.join(test.root, util.PM_CACHEDIR) if case == "EXISTS": pkg = test.findpkg(key, value, allow_local=True) if not pkg or not os.path.isfile( os.path.join(cachedir, pkg.filename())): success = 0 else: tap.diag("Rule kind '%s' not found" % kind) success = -1 if self.false and success != -1: success = not success self.result = success return success
def vprint(msg): if verbose: tap.diag(msg)
def run(self, pacman): if os.path.isfile(util.PM_LOCK): tap.bail("\tERROR: another pacman session is on-going -- skipping") return tap.diag("==> Running test") vprint("\tpacman %s" % self.args) cmd = [] if os.geteuid() != 0: fakeroot = util.which("fakeroot") if not fakeroot: tap.diag("WARNING: fakeroot not found!") else: cmd.append("fakeroot") fakechroot = util.which("fakechroot") if not fakechroot: tap.diag("WARNING: fakechroot not found!") else: cmd.append("fakechroot") if pacman["gdb"]: cmd.extend(["libtool", "execute", "gdb", "--args"]) if pacman["valgrind"]: suppfile = os.path.join(os.path.dirname(__file__), '..', '..', 'valgrind.supp') cmd.extend([ "libtool", "execute", "valgrind", "-q", "--tool=memcheck", "--leak-check=full", "--show-reachable=yes", "--gen-suppressions=all", "--child-silent-after-fork=yes", "--log-file=%s" % os.path.join(self.root, "var/log/valgrind"), "--suppressions=%s" % suppfile ]) self.addrule("FILE_EMPTY=var/log/valgrind") # replace program name with absolute path prog = pacman["bin"] if not prog: prog = util.which(self.cmd[0], pacman["bindir"]) if not prog or not os.access(prog, os.X_OK): if not prog: tap.bail("could not locate '%s' binary" % (self.cmd[0])) return cmd.append(os.path.abspath(prog)) cmd.extend(self.cmd[1:]) if pacman["manual-confirm"]: cmd.append("--confirm") if pacman["debug"]: cmd.append("--debug=%s" % pacman["debug"]) cmd.extend(shlex.split(self.args)) if not (pacman["gdb"] or pacman["nolog"]): output = open(os.path.join(self.root, util.LOGFILE), 'w') else: output = None vprint("\trunning: %s" % " ".join(cmd)) # Change to the tmp dir before running pacman, so that local package # archives are made available more easily. time_start = time.time() self.retcode = subprocess.call(cmd, stdout=output, stderr=output, cwd=os.path.join( self.root, util.TMPDIR), env={'LC_ALL': 'C'}) time_end = time.time() vprint("\ttime elapsed: %.2fs" % (time_end - time_start)) if output: output.close() vprint("\tretcode = %s" % self.retcode) # Check if the lock is still there if os.path.isfile(util.PM_LOCK): tap.diag("\tERROR: %s not removed" % util.PM_LOCK) os.unlink(util.PM_LOCK) # Look for a core file if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")): tap.diag("\tERROR: pacman dumped a core file")
def generate(self, pacman): tap.diag("==> Generating test environment") # Cleanup leftover files from a previous test session if os.path.isdir(self.root): shutil.rmtree(self.root) vprint("\t%s" % self.root) # Create directory structure vprint(" Creating directory structure:") dbdir = os.path.join(self.root, util.PM_SYNCDBPATH) cachedir = os.path.join(self.root, util.PM_CACHEDIR) syncdir = os.path.join(self.root, util.SYNCREPO) tmpdir = os.path.join(self.root, util.TMPDIR) logdir = os.path.join(self.root, os.path.dirname(util.LOGFILE)) etcdir = os.path.join(self.root, os.path.dirname(util.PACCONF)) bindir = os.path.join(self.root, "bin") ldconfig = os.path.basename(pacman["ldconfig"]) ldconfigdir = os.path.join(self.root, os.path.dirname(pacman["ldconfig"][1:])) shell = pacman["scriptlet-shell"][1:] shelldir = os.path.join(self.root, os.path.dirname(shell)) sys_dirs = [ dbdir, cachedir, syncdir, tmpdir, logdir, etcdir, bindir, ldconfigdir, shelldir ] for sys_dir in sys_dirs: if not os.path.isdir(sys_dir): vprint("\t%s" % sys_dir[len(self.root) + 1:]) os.makedirs(sys_dir, 0o755) # Only the dynamically linked binary is needed for fakechroot shutil.copy("/bin/sh", bindir) if shell != "bin/sh": shutil.copy("/bin/sh", os.path.join(self.root, shell)) shutil.copy(os.path.join(util.SELFPATH, "ldconfig.stub"), os.path.join(ldconfigdir, ldconfig)) ld_so_conf = open(os.path.join(etcdir, "ld.so.conf"), "w") ld_so_conf.close() # Configuration file vprint(" Creating configuration file") util.mkcfgfile(util.PACCONF, self.root, self.option, self.db) # Creating packages vprint(" Creating package archives") for pkg in self.localpkgs: vprint("\t%s" % os.path.join(util.TMPDIR, pkg.filename())) pkg.finalize() pkg.makepkg(tmpdir) for key, value in self.db.items(): for pkg in value.pkgs: pkg.finalize() if key == "local" and not self.createlocalpkgs: continue for pkg in value.pkgs: vprint("\t%s" % os.path.join(util.PM_CACHEDIR, pkg.filename())) if self.cachepkgs: pkg.makepkg(cachedir) else: pkg.makepkg(os.path.join(syncdir, value.treename)) pkg.md5sum = util.getmd5sum(pkg.path) pkg.csize = os.stat(pkg.path)[stat.ST_SIZE] # Creating sync database archives vprint(" Creating databases") for key, value in self.db.items(): vprint("\t" + value.treename) value.generate() # Filesystem vprint(" Populating file system") for f in self.filesystem: if type(f) is pmfile.pmfile: vprint("\t%s" % f.path) f.mkfile(self.root) else: vprint("\t%s" % f) path = util.mkfile(self.root, f, f) if os.path.isfile(path): os.utime(path, (355, 355)) for pkg in self.db["local"].pkgs: vprint("\tinstalling %s" % pkg.fullname()) pkg.install_package(self.root) if self.db["local"].pkgs and self.dbver >= 9: path = os.path.join(self.root, util.PM_DBPATH, "local") util.mkfile(path, "ALPM_DB_VERSION", str(self.dbver)) # Done. vprint(" Taking a snapshot of the file system") for filename in self.snapshots_needed(): f = pmfile.snapshot(self.root, filename) self.files.append(f) vprint("\t%s" % f.name)
def check(self, test): """ """ success = 1 [testname, args] = self.rule.split("=") if testname[0] == "!": self.false = 1 testname = testname[1:] [kind, case] = testname.split("_") if "|" in args: [key, value] = args.split("|", 1) else: [key, value] = [args, None] if kind == "PACMAN": if case == "RETCODE": if test.retcode != int(key): success = 0 elif case == "OUTPUT": logfile = os.path.join(test.root, util.LOGFILE) if not os.access(logfile, os.F_OK): tap.diag("LOGFILE not found, cannot validate 'OUTPUT' rule") success = 0 elif not util.grep(logfile, key): success = 0 else: tap.diag("PACMAN rule '%s' not found" % case) success = -1 elif kind == "PKG": localdb = test.db["local"] newpkg = localdb.db_read(key) if not newpkg: success = 0 else: if case == "EXIST": success = 1 elif case == "VERSION": if value != newpkg.version: success = 0 elif case == "DESC": if value != newpkg.desc: success = 0 elif case == "GROUPS": if not value in newpkg.groups: success = 0 elif case == "PROVIDES": if not value in newpkg.provides: success = 0 elif case == "DEPENDS": if not value in newpkg.depends: success = 0 elif case == "OPTDEPENDS": success = 0 for optdep in newpkg.optdepends: if value == optdep.split(':', 1)[0]: success = 1 break elif case == "REASON": if newpkg.reason != int(value): success = 0 elif case == "FILES": if not value in newpkg.files: success = 0 elif case == "BACKUP": found = 0 for f in newpkg.backup: name, md5sum = f.split("\t") if value == name: found = 1 if not found: success = 0 else: tap.diag("PKG rule '%s' not found" % case) success = -1 elif kind == "FILE": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.isfile(filename): success = 0 elif case == "EMPTY": if not (os.path.isfile(filename) and os.path.getsize(filename) == 0): success = 0 elif case == "MODIFIED": for f in test.files: if f.name == key: if not f.ismodified(): success = 0 break elif case == "MODE": if not os.path.isfile(filename): success = 0 else: mode = os.lstat(filename)[stat.ST_MODE] if int(value, 8) != stat.S_IMODE(mode): success = 0 elif case == "TYPE": if value == "dir": if not os.path.isdir(filename): success = 0 elif value == "file": if not os.path.isfile(filename): success = 0 elif value == "link": if not os.path.islink(filename): success = 0 elif case == "PACNEW": if not os.path.isfile("%s.pacnew" % filename): success = 0 elif case == "PACORIG": if not os.path.isfile("%s.pacorig" % filename): success = 0 elif case == "PACSAVE": if not os.path.isfile("%s.pacsave" % filename): success = 0 else: tap.diag("FILE rule '%s' not found" % case) success = -1 elif kind == "DIR": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.isdir(filename): success = 0 else: tap.diag("DIR rule '%s' not found" % case) success = -1 elif kind == "LINK": filename = os.path.join(test.root, key) if case == "EXIST": if not os.path.islink(filename): success = 0 else: tap.diag("LINK rule '%s' not found" % case) success = -1 elif kind == "CACHE": cachedir = os.path.join(test.root, util.PM_CACHEDIR) if case == "EXISTS": pkg = test.findpkg(key, value, allow_local=True) if not pkg or not os.path.isfile( os.path.join(cachedir, pkg.filename())): success = 0 else: tap.diag("Rule kind '%s' not found" % kind) success = -1 if self.false and success != -1: success = not success self.result = success return success
util.verbose = opts.verbose env.pacman["debug"] = opts.debug env.pacman["bin"] = opts.bin env.pacman["bindir"] = opts.bindir env.pacman["nolog"] = opts.nolog env.pacman["gdb"] = opts.gdb env.pacman["valgrind"] = opts.valgrind env.pacman["manual-confirm"] = opts.manualconfirm env.pacman["scriptlet-shell"] = opts.scriptletshell try: for i in args: env.addtest(i) except Exception as e: tap.bail(e) os.rmdir(root_path) sys.exit(2) # run tests env.run() if not opts.keeproot: shutil.rmtree(root_path) else: tap.diag("pacman testing root saved: %s" % root_path) if env.failed > 0: sys.exit(1) # vim: set ts=4 sw=4 et:
def run(self, pacman): if os.path.isfile(util.PM_LOCK): tap.bail("\tERROR: another pacman session is on-going -- skipping") return tap.diag("==> Running test") vprint("\tpacman %s" % self.args) cmd = [] if os.geteuid() != 0: fakeroot = util.which("fakeroot") if not fakeroot: tap.diag("WARNING: fakeroot not found!") else: cmd.append("fakeroot") fakechroot = util.which("fakechroot") if not fakechroot: tap.diag("WARNING: fakechroot not found!") else: cmd.append("fakechroot") if pacman["gdb"]: cmd.extend(["libtool", "execute", "gdb", "--args"]) if pacman["valgrind"]: suppfile = os.path.join(os.path.dirname(__file__), '..', '..', 'valgrind.supp') cmd.extend(["libtool", "execute", "valgrind", "-q", "--tool=memcheck", "--leak-check=full", "--show-reachable=yes", "--gen-suppressions=all", "--child-silent-after-fork=yes", "--log-file=%s" % os.path.join(self.root, "var/log/valgrind"), "--suppressions=%s" % suppfile]) self.addrule("FILE_EMPTY=var/log/valgrind") # replace program name with absolute path prog = pacman["bin"] if not prog: prog = util.which(self.cmd[0], pacman["bindir"]) if not prog or not os.access(prog, os.X_OK): if not prog: tap.bail("could not locate '%s' binary" % (self.cmd[0])) return cmd.append(os.path.abspath(prog)) cmd.extend(self.cmd[1:]) if pacman["manual-confirm"]: cmd.append("--confirm") if pacman["debug"]: cmd.append("--debug=%s" % pacman["debug"]) cmd.extend(shlex.split(self.args)) if not (pacman["gdb"] or pacman["nolog"]): output = open(os.path.join(self.root, util.LOGFILE), 'w') else: output = None vprint("\trunning: %s" % " ".join(cmd)) # Change to the tmp dir before running pacman, so that local package # archives are made available more easily. time_start = time.time() self.retcode = subprocess.call(cmd, stdout=output, stderr=output, cwd=os.path.join(self.root, util.TMPDIR), env={'LC_ALL': 'C'}) time_end = time.time() vprint("\ttime elapsed: %.2fs" % (time_end - time_start)) if output: output.close() vprint("\tretcode = %s" % self.retcode) # Check if the lock is still there if os.path.isfile(util.PM_LOCK): tap.diag("\tERROR: %s not removed" % util.PM_LOCK) os.unlink(util.PM_LOCK) # Look for a core file if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")): tap.diag("\tERROR: pacman dumped a core file")
def generate(self, pacman): tap.diag("==> Generating test environment") # Cleanup leftover files from a previous test session if os.path.isdir(self.root): shutil.rmtree(self.root) vprint("\t%s" % self.root) # Create directory structure vprint(" Creating directory structure:") dbdir = os.path.join(self.root, util.PM_SYNCDBPATH) cachedir = os.path.join(self.root, util.PM_CACHEDIR) syncdir = os.path.join(self.root, util.SYNCREPO) tmpdir = os.path.join(self.root, util.TMPDIR) logdir = os.path.join(self.root, os.path.dirname(util.LOGFILE)) etcdir = os.path.join(self.root, os.path.dirname(util.PACCONF)) bindir = os.path.join(self.root, "bin") ldconfig = os.path.basename(pacman["ldconfig"]) ldconfigdir = os.path.join(self.root, os.path.dirname(pacman["ldconfig"][1:])) shell = pacman["scriptlet-shell"][1:] shelldir = os.path.join(self.root, os.path.dirname(shell)) sys_dirs = [dbdir, cachedir, syncdir, tmpdir, logdir, etcdir, bindir, ldconfigdir, shelldir] for sys_dir in sys_dirs: if not os.path.isdir(sys_dir): vprint("\t%s" % sys_dir[len(self.root)+1:]) os.makedirs(sys_dir, 0o755) # Only the dynamically linked binary is needed for fakechroot shutil.copy("/bin/sh", bindir) if shell != "bin/sh": shutil.copy("/bin/sh", os.path.join(self.root, shell)) shutil.copy(os.path.join(util.SELFPATH, "ldconfig.stub"), os.path.join(ldconfigdir, ldconfig)) ld_so_conf = open(os.path.join(etcdir, "ld.so.conf"), "w") ld_so_conf.close() # Configuration file vprint(" Creating configuration file") util.mkcfgfile(util.PACCONF, self.root, self.option, self.db) # Creating packages vprint(" Creating package archives") for pkg in self.localpkgs: vprint("\t%s" % os.path.join(util.TMPDIR, pkg.filename())) pkg.finalize() pkg.makepkg(tmpdir) for key, value in self.db.items(): for pkg in value.pkgs: pkg.finalize() if key == "local" and not self.createlocalpkgs: continue for pkg in value.pkgs: vprint("\t%s" % os.path.join(util.PM_CACHEDIR, pkg.filename())) if self.cachepkgs: pkg.makepkg(cachedir) else: pkg.makepkg(os.path.join(syncdir, value.treename)) pkg.md5sum = util.getmd5sum(pkg.path) pkg.csize = os.stat(pkg.path)[stat.ST_SIZE] # Creating sync database archives vprint(" Creating databases") for key, value in self.db.items(): vprint("\t" + value.treename) value.generate() # Filesystem vprint(" Populating file system") for f in self.filesystem: if type(f) is pmfile.pmfile: vprint("\t%s" % f.path) f.mkfile(self.root); else: vprint("\t%s" % f) path = util.mkfile(self.root, f, f) if os.path.isfile(path): os.utime(path, (355, 355)) for pkg in self.db["local"].pkgs: vprint("\tinstalling %s" % pkg.fullname()) pkg.install_package(self.root) if self.db["local"].pkgs and self.dbver >= 9: path = os.path.join(self.root, util.PM_DBPATH, "local") util.mkfile(path, "ALPM_DB_VERSION", str(self.dbver)) # Done. vprint(" Taking a snapshot of the file system") for filename in self.snapshots_needed(): f = pmfile.snapshot(self.root, filename) self.files.append(f) vprint("\t%s" % f.name)
def run(self, pacman): if os.path.isfile(util.PM_LOCK): tap.bail("\tERROR: another pacman session is on-going -- skipping") return tap.diag("==> Running test") vprint("\tpacman %s" % self.args) cmd = [] if os.geteuid() != 0: fakeroot = util.which("fakeroot") if not fakeroot: tap.diag("WARNING: fakeroot not found!") else: cmd.append("fakeroot") fakechroot = util.which("fakechroot") if not fakechroot: tap.diag("WARNING: fakechroot not found!") else: cmd.append("fakechroot") if pacman["gdb"]: cmd.extend(["libtool", "execute", "gdb", "--args"]) if pacman["valgrind"]: suppfile = os.path.join(os.path.dirname(__file__), '..', '..', 'valgrind.supp') cmd.extend(["libtool", "execute", "valgrind", "-q", "--tool=memcheck", "--leak-check=full", "--show-reachable=yes", "--suppressions=%s" % suppfile]) cmd.extend([pacman["bin"], "--config", os.path.join(self.root, util.PACCONF), "--root", self.root, "--dbpath", os.path.join(self.root, util.PM_DBPATH), "--cachedir", os.path.join(self.root, util.PM_CACHEDIR)]) if not pacman["manual-confirm"]: cmd.append("--noconfirm") if pacman["debug"]: cmd.append("--debug=%s" % pacman["debug"]) cmd.extend(shlex.split(self.args)) if not (pacman["gdb"] or pacman["valgrind"] or pacman["nolog"]): output = open(os.path.join(self.root, util.LOGFILE), 'w') else: output = None vprint("\trunning: %s" % " ".join(cmd)) # Change to the tmp dir before running pacman, so that local package # archives are made available more easily. time_start = time.time() self.retcode = subprocess.call(cmd, stdout=output, stderr=output, cwd=os.path.join(self.root, util.TMPDIR), env={'LC_ALL': 'C'}) time_end = time.time() vprint("\ttime elapsed: %.2fs" % (time_end - time_start)) if output: output.close() vprint("\tretcode = %s" % self.retcode) # Check if the lock is still there if os.path.isfile(util.PM_LOCK): tap.diag("\tERROR: %s not removed" % util.PM_LOCK) os.unlink(util.PM_LOCK) # Look for a core file if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")): tap.diag("\tERROR: pacman dumped a core file")
def run(self, pacman): if os.path.isfile(util.PM_LOCK): tap.bail("\tERROR: another pacman session is on-going -- skipping") return tap.diag("==> Running test") vprint("\tpacman %s" % self.args) cmd = [] if os.geteuid() != 0: fakeroot = util.which("fakeroot") if not fakeroot: tap.diag("WARNING: fakeroot not found!") else: cmd.append("fakeroot") fakechroot = util.which("fakechroot") if not fakechroot: tap.diag("WARNING: fakechroot not found!") else: cmd.append("fakechroot") if pacman["gdb"]: cmd.extend(["libtool", "execute", "gdb", "--args"]) if pacman["valgrind"]: suppfile = os.path.join(os.path.dirname(__file__), '..', '..', 'valgrind.supp') cmd.extend([ "libtool", "execute", "valgrind", "-q", "--tool=memcheck", "--leak-check=full", "--show-reachable=yes", "--suppressions=%s" % suppfile ]) cmd.extend([ pacman["bin"], "--config", os.path.join(self.root, util.PACCONF), "--root", self.root, "--dbpath", os.path.join(self.root, util.PM_DBPATH), "--cachedir", os.path.join(self.root, util.PM_CACHEDIR) ]) if not pacman["manual-confirm"]: cmd.append("--noconfirm") if pacman["debug"]: cmd.append("--debug=%s" % pacman["debug"]) cmd.extend(shlex.split(self.args)) if not (pacman["gdb"] or pacman["valgrind"] or pacman["nolog"]): output = open(os.path.join(self.root, util.LOGFILE), 'w') else: output = None vprint("\trunning: %s" % " ".join(cmd)) # Change to the tmp dir before running pacman, so that local package # archives are made available more easily. time_start = time.time() self.retcode = subprocess.call(cmd, stdout=output, stderr=output, cwd=os.path.join( self.root, util.TMPDIR), env={'LC_ALL': 'C'}) time_end = time.time() vprint("\ttime elapsed: %.2fs" % (time_end - time_start)) if output: output.close() vprint("\tretcode = %s" % self.retcode) # Check if the lock is still there if os.path.isfile(util.PM_LOCK): tap.diag("\tERROR: %s not removed" % util.PM_LOCK) os.unlink(util.PM_LOCK) # Look for a core file if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")): tap.diag("\tERROR: pacman dumped a core file")