Exemple #1
0
 def _load_foods(self):
     util.vprint('Loading nutrient data')
     nutritional_data = self._load_nutritional_data()
     util.vprint('Loading food data')
     with open(util.relative_path(
             'kb_data/sr27asc/FOOD_DES.txt')) as food_des_txt:
         food_des_lines = food_des_txt.readlines()
         for food_des_line in food_des_lines:
             parsed_line = parse_usda_line(food_des_line)
             new_food = Food(parsed_line[0],
                             parsed_line[1],
                             parsed_line[2],
                             common_name=parsed_line[4])
             if new_food.food_group in food_group_blacklist:
                 continue
             if new_food.food_id in food_id_blacklist:
                 continue
             bad_food_name = False
             for keyword_group in food_keyword_blacklist:
                 for keyword in keyword_group:
                     if keyword in new_food.name:
                         bad_food_name = True
             if bad_food_name:
                 continue
             if new_food.food_id in nutritional_data:
                 new_food.nutritional_data = nutritional_data[
                     new_food.food_id]
             self.foods.append(new_food)
Exemple #2
0
    def profile(self, profile_settings):
        # cd into repo
        sh.cd(self.repo.name())

        # create the initial profiler
        profiler = get_profiler(profile_settings, self.verbose)

        # determine how to traverse hashes
        iter = get_hash_iterator(traversal_type=profile_settings['traversal'])

        # do the traversal
        self.profiles_by_hash = {}
        for git_hash in iter(self.repo.get_hashes()):
            # first checkout this version of the code
            self.repo.checkout_hash(git_hash)

            # then build
            try:
                self.build()
            except:
                vprint(self.verbose,
                       'Failed to build for hash \'{}\''.format(git_hash))

            # now do the profiling
            prof = profiler.profile(self.repo)
            self.profiles_by_hash[git_hash] = prof

        # step back out of repo
        sh.cd('..')
Exemple #3
0
 def connect(self):
     """establishes connection with mongoDB"""
     try:
         self.conn = pymongo.MongoClient()
         vprint("Connected to MongoDB")
     except pymongo.errors.ConnectionFailure, e:
         warning("Could not connect to MongoDB: %s" % e, exit=True)
Exemple #4
0
 def access(self, db_name):
     """connects to a mongoDB database"""
     if db_name in self.conn.database_names():
         vprint("Connecting to existing %s database" % db_name)
     else:
         vprint("Initializing new %s database" % db_name)
     self.db = self.conn[db_name]
     return
Exemple #5
0
 def equivalent(self, t1, t2):
     vprint(P_JOIN_VERIF, 'Solver: %s ?= %s' % (t1, t2))
     self.s.push()
     self.s.add(
         eval(t1.get_str(True), {}, self.eval_dict) != eval(
             t2.get_str(True), {}, self.eval_dict))
     if self.s.check().r == Z3_L_FALSE:
         self.s.pop()
         return True
     self.s.pop()
     return False
Exemple #6
0
 def load_collection(self, force_reload):
     """makes a collection selection"""
     if self.collection_name not in self.db.collection_names():
         self.write_tweets()
     else:
         self.collection = self.db[self.collection_name]
         if force_reload:
             vprint('Deleting collection')
             self.collection.drop()
             self.write_tweets()
     return
Exemple #7
0
 def get_successors(self, state):
     out = []
     new_terms = []
     for i in range(len(self.rules)):
         new = [flatten(rew) for rew in self.rules[i].apply(state.term)
                ]  # TODO: no need to flatten if rules preserve flatness
         new_terms = new if type(new) == list else [new]
         for new_term in new_terms:
             new_cost, breakdown = self.strategy.get_cost(
                 state, new_term, i)
             new_state = State(new_term, new_cost, state, i)
             new_state.cost_breakdown = breakdown
             vprint(P_SUCCESSORS, 'Rule: ', state.term, '->', new_term,
                    '(%s)' % self.rules[i], new_cost)
             out.append(new_state)
     return out
Exemple #8
0
 def write_tweets(self):
     """loads tweets from a JSON file and writes them to the database"""
     self.collection = self.db[self.collection_name]
     vprint('Preparing to load tweets...')
     if not os.path.isfile(self.json_name):
         warning('The requested file does not exist: %s' % self.json_name,
                 exit=True)
     total_tweets = sum(1 for line in open(self.json_name, 'r'))
     bar = ProgressBar('Loading Tweets', total_tweets)
     with open(self.json_name, 'r') as f:
         i = 0
         for tweet in f:
             tweet_json = json.loads(tweet)
             data = Database.load_tweet_json(tweet_json)
             self.collection.insert(data)
             i += 1
             if not i % 1000:
                 bar.set_progress(i)
     bar.end_progress()
     vprint('Finished writing tweets!')
     return
    def profile(self, repo):
        # for gprof we can run each test in the application
        # however, we must move and/or rename gman.out each time
        # and then we should go ahead and read hatchet profile
        profiles = {}
        for test in repo.itertests():
            vprint(self.verbose,
                   'Profiling test \'{}\'...'.format(test['name']))
            exec_path = os.path.join(test['prefix'], test['executable'])
            cmd = Command(exec_path)

            try:
                cmd(test['args'])
            except:
                vprint(self.verbose,
                       'Running test \'{}\' failed...'.format(test['name']))
                continue

            if not os.path.isfile('gmon.out'):
                vprint(self.verbose, 'Unable to read profile...')
                continue

            # read in profile with gprof and gprof2dot
            dotfile_name = 'profile-dot-graph.dot'
            gprof = Command('gprof')
            gprof2dot = Command('gprof2dot')
            with open(dotfile_name, 'w+') as outFile:
                gprof2dot(gprof(exec_path), '-n0', '-e0', _out=outFile)

            # finally read this into hatchet
            gf = ht.GraphFrame.from_gprof_dot(dotfile_name)

            profiles[test['name']] = gf

        return profiles
Exemple #10
0
def main():
    parser = ArgumentParser()
    parser.add_argument('-i',
                        '--input',
                        type=str,
                        required=True,
                        help='json file to read input settings')
    parser.add_argument('--working-directory',
                        type=str,
                        default='scratch',
                        help='where to store files')
    parser.add_argument('-p',
                        '--preserve',
                        action='store_true',
                        help='don\'t remove scratch files')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='verbose output')
    args = parser.parse_args()

    input_settings = read_input(args.input)

    # move into working directory
    vprint(args.verbose, 'Creating working directory...')
    sh.mkdir('-p', args.working_directory)
    sh.cd(args.working_directory)

    # do the profiling for every application
    for application in input_settings:
        vprint(
            args.verbose, 'Initializing tests for \'{}\'...'.format(
                application['repo']['url']))
        tester = Tester(application["repo"], verbose=args.verbose)
        setup_successful = tester.setup()

        if setup_successful:
            tester.profile(application["profile"])
            tester.print_summary()
        else:
            vprint(
                args.verbose,
                'Failed to setup \'{}\'. Skipping tests...'.format(
                    application['repo']['url']))

    # step back and remove working directory
    vprint(args.verbose, 'Cleaning up...')
    sh.cd('..')
    if not args.preserve:
        sh.rm('-rf', args.working_directory)
    def profile(self, repo):
        profiles = {}
        for test in repo.itertests():
            vprint(self.verbose,
                   'Profiling test \'{}\'...'.format(test['name']))
            exec_path = os.path.join(test['prefix'], test['executable'])

            hpcstruct_name = '{}.hpcstruct'.format(test['name'])
            hpcmeasurements_name = 'hpctoolkit-{}-measurements'.format(
                test['name'])
            hpcdatabase_name = 'hpctoolkit-{}-database'.format(test['name'])

            # try to generate hpcstruct
            try:
                self.hpcstruct_cmd(exec_path, '--output', hpcstruct_name)
            except:
                vprint(self.verbose, 'Failed to create hpcstruct file...')
                continue

            # run test
            try:
                self.hpcrun_cmd('--output', hpcmeasurements_name, exec_path,
                                test['args'])
            except:
                vprint(self.verbose,
                       'Running test \'{}\' failed...'.format(test['name']))
                continue

            # generate profile
            try:
                self.hpcrun_cmd('--output', hpcmeasurements_name, exec_path,
                                test['args'])
                self.hpcprof_cmd('-S', hpcstruct_name, '-I', './+', '--output',
                                 hpcdatabase_name, hpcmeasurements_name)
            except:
                vprint(self.verbose,
                       'Running test \'{}\' failed...'.format(test['name']))
                continue

            # finally read hatchet profile
            profiles[test['name']] = ht.GraphFrame.from_hpctoolkit(
                hpcdatabase_name)

            # and now delete the leftover files/folders
            rm('-r', hpcstruct_name, hpcmeasurements_name, hpcdatabase_name)

        return profiles
Exemple #12
0
    def search(self):
        open_set = PriorityQueue()
        init_state = self.get_initial_state()
        open_set.put(
            (init_state.cost + self.strategy.get_heuristic(init_state),
             init_state))
        seen = {init_state: init_state.cost}
        while not open_set.empty():
            _, state = open_set.get()
            self.state_count += 1
            self.strategy.state_visit(state)
            self.stats.log_state(state)
            vprint(P_STATES, "State",
                   "[%d, %d]:" % (self.state_count, self.hits), state)
            vprint(P_COSTS, 'State costs: ',
                   ', '.join([str(cost) for cost in state.cost_breakdown]))
            for pred in [state] + state.get_predecessors():
                vprint(
                    P_STATE_PATH, '^%-50s %s' % (pred.term, ', '.join(
                        ['%3s' % str(cost) for cost in pred.cost_breakdown])))
            if R_CHECK:
                self.rewrite_check(state)
            if self.benchmark_sequence:  # benchmark mode
                if str(state.term) in self.benchmark_sequence:
                    vprint(True, "### Milestone:", state, "###")
                    if self.benchmark_sequence[-1] == str(state.term):
                        return None
                    self.benchmark_sequence.remove(str(state.term))
                    self.hits += 1  # variable has different meaning in this case
            else:
                outcome = self.outcome(state)
                if outcome:
                    self.stats.log_state(state)
                    return outcome

            for i, succ_state in loopthru(self.get_successors(state),
                                          I_REWRITE,
                                          'select a rewrite of %s:' % state):
                succ_metric = succ_state.cost + self.strategy.get_heuristic(
                    succ_state)
                if not succ_state in seen or succ_metric < seen[succ_state]:
                    seen[succ_state] = succ_metric
                    open_set.put((succ_metric, succ_state))
                self.rule_choice_record.append(i)
        return None
def run(db, target, event, event2):
    """Determines winners, awards, and creates ceremony time line"""
    event.wait()  # Wait for start_time to be set
    vprint('Received start time. Finding winners...')
    raw_winners = read_winners(db, target)
    vprint('Processing winners...')
    processed_winners = consolidate_winners(raw_winners)
    vprint('Sorting winners...')
    sorted_winners = sorted(processed_winners.items(),
                            key=sort_winners,
                            reverse=True)
    vprint('Getting top winners...')
    top_winners = get_top_winners(sorted_winners)
    consolidated_winners = super_consolidate(top_winners)
    sorted_super = sorted(consolidated_winners.items(),
                          key=sort_winners,
                          reverse=True)
    target.winners = match_to_awards(sorted_super)
    event2.set()
Exemple #14
0
    def ismodified(self):
        checksum = util.getmd5sum(self.filename)
        mtime = self.getmtime()

        util.vprint("\tismodified(%s)" % self.name)
        util.vprint("\t\told: %s / %s" % (self.checksum, self.mtime))
        util.vprint("\t\tnew: %s / %s" % (checksum, mtime))

        if self.checksum != checksum or self.mtime[0] != mtime[0] or self.mtime[1] != mtime[1]:
            return True

        return False
Exemple #15
0
    def ismodified(self):
        checksum = util.getmd5sum(self.filename)
        mtime = self.getmtime()

        util.vprint("\tismodified(%s)" % self.name)
        util.vprint("\t\told: %s / %s" % (self.checksum, self.mtime))
        util.vprint("\t\tnew: %s / %s" % (checksum, mtime))

        if self.checksum != checksum \
                or self.mtime[0] != mtime[0] \
                or self.mtime[1] != mtime[1]:
            return True

        return False
Exemple #16
0
 def build(self):
     vprint(self.verbose, 'Building...')
     command_runner = bash.bake('-c')
     for command in self.repo_settings['build']['commands']:
         command_runner(command)
Exemple #17
0
 def checkout_hash(self, hash):
     vprint(self.verbose, 'Checking out hash \'{}\'...'.format(hash))
     git.checkout(hash)
Exemple #18
0
 def clone(self):
     vprint(self.verbose,
            'Cloning \'{}\'...'.format(self.repo_settings['url']))
     git.clone(self.repo_settings['url'])
Exemple #19
0
    def generate(self, pacman):
        tap.diag("==> Generating test environment")

        # Cleanup leftover files from a previous test session
        if os.path.isdir(self.root):
            shutil.rmtree(self.root)
        vprint("\t%s" % self.root)

        # Create directory structure
        vprint("    Creating directory structure:")
        dbdir = os.path.join(self.root, util.PM_SYNCDBPATH)
        cachedir = os.path.join(self.root, util.PM_CACHEDIR)
        syncdir = os.path.join(self.root, util.SYNCREPO)
        tmpdir = os.path.join(self.root, util.TMPDIR)
        logdir = os.path.join(self.root, os.path.dirname(util.LOGFILE))
        etcdir = os.path.join(self.root, os.path.dirname(util.PACCONF))
        bindir = os.path.join(self.root, "bin")
        ldconfig = os.path.basename(pacman["ldconfig"])
        ldconfigdir = os.path.join(self.root,
                                   os.path.dirname(pacman["ldconfig"][1:]))
        shell = pacman["scriptlet-shell"][1:]
        shelldir = os.path.join(self.root, os.path.dirname(shell))
        sys_dirs = [
            dbdir, cachedir, syncdir, tmpdir, logdir, etcdir, bindir,
            ldconfigdir, shelldir
        ]
        for sys_dir in sys_dirs:
            if not os.path.isdir(sys_dir):
                vprint("\t%s" % sys_dir[len(self.root) + 1:])
                os.makedirs(sys_dir, 0o755)
        # Only the dynamically linked binary is needed for fakechroot
        shutil.copy("/bin/sh", bindir)
        if shell != "bin/sh":
            shutil.copy("/bin/sh", os.path.join(self.root, shell))
        shutil.copy(os.path.join(util.SELFPATH, "ldconfig.stub"),
                    os.path.join(ldconfigdir, ldconfig))
        ld_so_conf = open(os.path.join(etcdir, "ld.so.conf"), "w")
        ld_so_conf.close()

        # Configuration file
        vprint("    Creating configuration file")
        util.mkcfgfile(util.PACCONF, self.root, self.option, self.db)

        # Creating packages
        vprint("    Creating package archives")
        for pkg in self.localpkgs:
            vprint("\t%s" % os.path.join(util.TMPDIR, pkg.filename()))
            pkg.finalize()
            pkg.makepkg(tmpdir)
        for key, value in self.db.items():
            for pkg in value.pkgs:
                pkg.finalize()
            if key == "local" and not self.createlocalpkgs:
                continue
            for pkg in value.pkgs:
                vprint("\t%s" % os.path.join(util.PM_CACHEDIR, pkg.filename()))
                if self.cachepkgs:
                    pkg.makepkg(cachedir)
                else:
                    pkg.makepkg(os.path.join(syncdir, value.treename))
                pkg.md5sum = util.getmd5sum(pkg.path)
                pkg.csize = os.stat(pkg.path)[stat.ST_SIZE]

        # Creating sync database archives
        vprint("    Creating databases")
        for key, value in self.db.items():
            vprint("\t" + value.treename)
            value.generate()

        # Filesystem
        vprint("    Populating file system")
        for f in self.filesystem:
            if type(f) is pmfile.pmfile:
                vprint("\t%s" % f.path)
                f.mkfile(self.root)
            else:
                vprint("\t%s" % f)
                path = util.mkfile(self.root, f, f)
                if os.path.isfile(path):
                    os.utime(path, (355, 355))
        for pkg in self.db["local"].pkgs:
            vprint("\tinstalling %s" % pkg.fullname())
            pkg.install_package(self.root)
        if self.db["local"].pkgs and self.dbver >= 9:
            path = os.path.join(self.root, util.PM_DBPATH, "local")
            util.mkfile(path, "ALPM_DB_VERSION", str(self.dbver))

        # Done.
        vprint("    Taking a snapshot of the file system")
        for filename in self.snapshots_needed():
            f = pmfile.snapshot(self.root, filename)
            self.files.append(f)
            vprint("\t%s" % f.name)
Exemple #20
0
    def generate(self):
        print "==> Generating test environment"

        # Cleanup leftover files from a previous test session
        if os.path.isdir(self.root):
            shutil.rmtree(self.root)
        vprint("\t%s" % self.root)

        # Create directory structure
        vprint("    Creating directory structure:")
        dbdir = os.path.join(self.root, util.PM_SYNCDBPATH)
        cachedir = os.path.join(self.root, util.PM_CACHEDIR)
        syncdir = os.path.join(self.root, util.SYNCREPO)
        tmpdir = os.path.join(self.root, util.TMPDIR)
        logdir = os.path.join(self.root, os.path.dirname(util.LOGFILE))
        etcdir = os.path.join(self.root, os.path.dirname(util.PACCONF))
        bindir = os.path.join(self.root, "bin")
        sys_dirs = [dbdir, cachedir, syncdir, tmpdir, logdir, etcdir, bindir]
        for sys_dir in sys_dirs:
            if not os.path.isdir(sys_dir):
                vprint("\t%s" % sys_dir[len(self.root) + 1:])
                os.makedirs(sys_dir, 0755)
        # Only the dynamically linked binary is needed for fakechroot
        shutil.copy("/bin/sh", bindir)

        # Configuration file
        vprint("    Creating configuration file")
        util.mkcfgfile(util.PACCONF, self.root, self.option, self.db)

        # Creating packages
        vprint("    Creating package archives")
        for pkg in self.localpkgs:
            vprint("\t%s" % os.path.join(util.TMPDIR, pkg.filename()))
            pkg.makepkg(tmpdir)
        for key, value in self.db.iteritems():
            if key == "local" and not self.createlocalpkgs:
                continue
            for pkg in value.pkgs:
                vprint("\t%s" % os.path.join(util.PM_CACHEDIR, pkg.filename()))
                if self.cachepkgs:
                    pkg.makepkg(cachedir)
                else:
                    pkg.makepkg(os.path.join(syncdir, value.treename))
                pkg.md5sum = util.getmd5sum(pkg.path)
                pkg.csize = os.stat(pkg.path)[stat.ST_SIZE]

        # Creating sync database archives
        vprint("    Creating databases")
        for key, value in self.db.iteritems():
            vprint("\t" + value.treename)
            value.generate()

        # Filesystem
        vprint("    Populating file system")
        for pkg in self.db["local"].pkgs:
            vprint("\tinstalling %s" % pkg.fullname())
            for f in pkg.files:
                vprint("\t%s" % f)
                path = os.path.join(self.root, f)
                util.mkfile(path, f)
                if os.path.isfile(path):
                    os.utime(path, (355, 355))
        for f in self.filesystem:
            vprint("\t%s" % f)
            path = os.path.join(self.root, f)
            util.mkfile(path, f)
            if os.path.isfile(path):
                os.utime(path, (355, 355))

        # Done.
        vprint("    Taking a snapshot of the file system")
        for roots, dirs, files in os.walk(self.root):
            for i in files:
                filename = os.path.join(roots, i)
                f = pmfile.PacmanFile(self.root,
                                      filename.replace(self.root + "/", ""))
                self.files.append(f)
                vprint("\t%s" % f.name)
def run(db, target, event):
    event.wait()
    util.vprint("Winners received. finding presenters and nominees...")
    for winner, value, time in target.winners:
        presenter_names = {}
        nominee_names = {}
        for i in [-1, 1]:
            if i == -1:
                current_dict = presenter_names
                start = time - 180000
                end = time
            else:
                current_dict = nominee_names
                start = time
                end = time + 360000
            if target.timestamp_format == 'str':
                start = str(start)
                end = str(end)
            cursor = db.collection.find(
                {'timestamp_ms': {
                    '$gt': start,
                    '$lt': end
                }})
            for tweet in cursor:
                if i == 1 and not regex.eehhhh.match(tweet['text']):
                    continue
                n = regex.name.match(tweet['text'])
                if n:
                    n = n.group()
                    n = n.lower()
                    toks = nltk.word_tokenize(n)
                    if toks[0] in util.common_words or toks[
                            1] in util.common_words:
                        continue
                    else:
                        if n in current_dict:
                            current_dict[n] += 1
                        # elif not weed_out(n, target):
                        else:
                            current_dict[n] = 1
        pl = sorted(presenter_names.items(),
                    key=operator.itemgetter(1),
                    reverse=True)
        nl = sorted(nominee_names.items(),
                    key=operator.itemgetter(1),
                    reverse=True)
        if winner in pl:
            pl.remove(winner)
        if winner in nl:
            nl.remove(winner)

        if len(pl):
            pl_trunc = []
            for i in range(len(pl)):
                if i > 1:
                    break
            pl_trunc.append(pl[i][0])
            target.presenters.append(tuple(pl_trunc))
        else:
            target.presenters.append(())

        if len(nl):
            nl_trunc = []
            for j in range(len(nl)):
                if i > 3:
                    break
                nl_trunc.append(pl[j][0])
            target.nominees.append(tuple(nl_trunc))
        else:
            target.nominees.append(())
    util.vprint("Finished Presenters and Noms")
    return
Exemple #22
0
def _gcjut_rec(loop, t, conj_existing_states=False):

    # Base case
    if type(t) == Var or type(t) == Const:
        return [Join(loop, t)]

    out = []

    # Recursively call function on subterms
    joins = [_gcjut_rec(loop, st) for st in t.terms]

    for j_comb in product(*joins):

        # For this particular combination of joins, obtain a merged join
        merged_join = merge(loop, t.op, j_comb)
        vprint(P_JOIN_GEN, "Join: merged these joins:")
        for join in j_comb:
            vprint(P_JOIN_GEN, "Join:", join)
        out.append(merged_join)  # Case when merged join is not a new auxillary
        vprint(P_JOIN_GEN, "Join: candidate join (merged) =\n", merged_join)
        if not merged_join.term.state_free("SV"):
            continue

        # Find all constants and obtain a mapping to their locations
        const_indv = _get_const_indv(merged_join.term)
        if not const_indv:
            return out

        for const in const_indv.keys():
            vprint(
                P_JOIN_GEN,
                "Join: const %s appears in locations %s within %s)" %
                (str(const), str(const_indv[const]), str(merged_join.term)))
            for ind_set in powerset(const_indv[const]):
                if not ind_set:
                    continue
                rem_set = const_indv[const][:]
                auxjn = Join(merged_join.loop, merged_join.term)
                k = auxjn.loop.get_num_states()

                # Conjecture that this particular choice of indices corresponds
                # to locations of an auxillary state variable
                for ind in ind_set:
                    auxjn.term.set_term_at(ind, Var("RSV", "s", k + 1))
                    rem_set.remove(ind)
                # Unfold right variables in term to obtain definition for auxillary
                auxterm = auxjn.term.rename("RSV", "SV").apply_subst(
                    merged_join.loop.get_full_state_subst())

                # For all remaining indices, conjecture that some of them point to
                # existing state variables (if conj_existing_states is True)
                for state_assgn in product(
                        *[list(range(loop.get_num_states() + 1)) for _ in range(len(rem_set))]) \
                        if conj_existing_states else [[0] * len(rem_set)]:
                    auxjn_v = deepcopy(auxjn)
                    auxterm_v = deepcopy(auxterm)
                    for i in range(len(rem_set)):
                        if state_assgn[i] != 0:
                            auxterm_v.set_term_at(
                                rem_set[i], Var("SV", "s", state_assgn[i]))

                    # Add the auxillary variable and set the join to be the auxillary
                    # Note: the auxillary variable could already exist among the states,
                    # in which case, r is an index to the existing state
                    r = auxjn_v.loop.add_state(const, auxterm_v, k)
                    auxjn_v.term = Var("RSV", "s", r + 1)

                    out.append(auxjn_v)
                    vprint(P_JOIN_GEN, "Join: new auxillary variable:")
                    vprint(
                        P_JOIN_GEN,
                        "Join: %s = %s" % (str(auxjn_v.term), str(auxterm_v)))
                    vprint(P_JOIN_GEN,
                           "Join: candidate join (with auxillaries) =\n",
                           str(auxjn_v))
    return out
Exemple #23
0
def get_candidate_join_unfold_terms(loop, t):
    vprint(P_JOIN_GEN, "Join: Starting gcjut")
    vprint(P_JOIN_GEN, "Join: loop = \n", loop)
    vprint(P_JOIN_GEN, "Join: term = %s" % str(t))
    return [join for join in _gcjut_rec(loop, t) if join.term.state_free("IV")]
Exemple #24
0
    def generate(self, pacman):
        tap.diag("==> Generating test environment")

        # Cleanup leftover files from a previous test session
        if os.path.isdir(self.root):
            shutil.rmtree(self.root)
        vprint("\t%s" % self.root)

        # Create directory structure
        vprint("    Creating directory structure:")
        dbdir = os.path.join(self.root, util.PM_SYNCDBPATH)
        cachedir = os.path.join(self.root, util.PM_CACHEDIR)
        syncdir = os.path.join(self.root, util.SYNCREPO)
        tmpdir = os.path.join(self.root, util.TMPDIR)
        logdir = os.path.join(self.root, os.path.dirname(util.LOGFILE))
        etcdir = os.path.join(self.root, os.path.dirname(util.PACCONF))
        bindir = os.path.join(self.root, "bin")
        shell = pacman["scriptlet-shell"][1:]
        shelldir = os.path.join(self.root, os.path.dirname(shell))
        sys_dirs = [dbdir, cachedir, syncdir, tmpdir, logdir, etcdir, bindir,
                    shelldir]
        for sys_dir in sys_dirs:
            if not os.path.isdir(sys_dir):
                vprint("\t%s" % sys_dir[len(self.root)+1:])
                os.makedirs(sys_dir, 0o755)
        # Only the dynamically linked binary is needed for fakechroot
        shutil.copy("/bin/sh", bindir)
        if shell != "bin/sh":
            shutil.copy("/bin/sh", os.path.join(self.root, shell))

        # Configuration file
        vprint("    Creating configuration file")
        util.mkcfgfile(util.PACCONF, self.root, self.option, self.db)

        # Creating packages
        vprint("    Creating package archives")
        for pkg in self.localpkgs:
            vprint("\t%s" % os.path.join(util.TMPDIR, pkg.filename()))
            pkg.finalize()
            pkg.makepkg(tmpdir)
        for key, value in self.db.items():
            for pkg in value.pkgs:
                pkg.finalize()
            if key == "local" and not self.createlocalpkgs:
                continue
            for pkg in value.pkgs:
                vprint("\t%s" % os.path.join(util.PM_CACHEDIR, pkg.filename()))
                if self.cachepkgs:
                    pkg.makepkg(cachedir)
                else:
                    pkg.makepkg(os.path.join(syncdir, value.treename))
                pkg.md5sum = util.getmd5sum(pkg.path)
                pkg.csize = os.stat(pkg.path)[stat.ST_SIZE]

        # Creating sync database archives
        vprint("    Creating databases")
        for key, value in self.db.items():
            vprint("\t" + value.treename)
            value.generate()

        # Filesystem
        vprint("    Populating file system")
        for f in self.filesystem:
            if type(f) is pmfile.pmfile:
                vprint("\t%s" % f.path)
                f.mkfile(self.root);
            else:
                vprint("\t%s" % f)
                path = util.mkfile(self.root, f, f)
                if os.path.isfile(path):
                    os.utime(path, (355, 355))
        for pkg in self.db["local"].pkgs:
            vprint("\tinstalling %s" % pkg.fullname())
            pkg.install_package(self.root)
        if self.db["local"].pkgs and self.dbver >= 9:
            path = os.path.join(self.root, util.PM_DBPATH, "local")
            util.mkfile(path, "ALPM_DB_VERSION", str(self.dbver))

        # Done.
        vprint("    Taking a snapshot of the file system")
        for filename in self.snapshots_needed():
            f = pmfile.snapshot(self.root, filename)
            self.files.append(f)
            vprint("\t%s" % f.name)
Exemple #25
0
    def run(self, pacman):
        if os.path.isfile(util.PM_LOCK):
            print "\tERROR: another pacman session is on-going -- skipping"
            return

        print "==> Running test"
        vprint("\tpacman %s" % self.args)

        cmd = []
        if os.geteuid() != 0:
            fakeroot = util.which("fakeroot")
            if not fakeroot:
                print "WARNING: fakeroot not found!"
            else:
                cmd.append("fakeroot")

            fakechroot = util.which("fakechroot")
            if fakechroot:
                cmd.append("fakechroot")

        if pacman["gdb"]:
            cmd.extend(["libtool", "execute", "gdb", "--args"])
        if pacman["valgrind"]:
            suppfile = os.path.join(os.path.dirname(__file__), '..', '..',
                                    'valgrind.supp')
            cmd.extend([
                "libtool", "execute", "valgrind", "-q", "--tool=memcheck",
                "--leak-check=full", "--show-reachable=yes",
                "--suppressions=%s" % suppfile
            ])
        cmd.extend([
            pacman["bin"], "--config",
            os.path.join(self.root, util.PACCONF), "--root", self.root,
            "--dbpath",
            os.path.join(self.root, util.PM_DBPATH), "--cachedir",
            os.path.join(self.root, util.PM_CACHEDIR)
        ])
        if not pacman["manual-confirm"]:
            cmd.append("--noconfirm")
        if pacman["debug"]:
            cmd.append("--debug=%s" % pacman["debug"])
        cmd.extend(shlex.split(self.args))
        if not (pacman["gdb"] or pacman["valgrind"] or pacman["nolog"]):
            output = open(os.path.join(self.root, util.LOGFILE), 'w')
        else:
            output = None
        vprint("\trunning: %s" % " ".join(cmd))

        # Change to the tmp dir before running pacman, so that local package
        # archives are made available more easily.
        time_start = time.time()
        self.retcode = subprocess.call(cmd,
                                       stdout=output,
                                       stderr=output,
                                       cwd=os.path.join(
                                           self.root, util.TMPDIR))
        time_end = time.time()
        vprint("\ttime elapsed: %.2fs" % (time_end - time_start))

        if output:
            output.close()

        vprint("\tretcode = %s" % self.retcode)

        # Check if the lock is still there
        if os.path.isfile(util.PM_LOCK):
            print "\tERROR: %s not removed" % util.PM_LOCK
            os.unlink(util.PM_LOCK)
        # Look for a core file
        if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")):
            print "\tERROR: pacman dumped a core file"
Exemple #26
0
 def start_job(self, cmd, id):
     vprint(2, "Executing '%s' as job %s" % (cmd, id))
Exemple #27
0
    def run(self, pacman):
        if os.path.isfile(util.PM_LOCK):
            tap.bail("\tERROR: another pacman session is on-going -- skipping")
            return

        tap.diag("==> Running test")
        vprint("\tpacman %s" % self.args)

        cmd = []
        if os.geteuid() != 0:
            fakeroot = util.which("fakeroot")
            if not fakeroot:
                tap.diag("WARNING: fakeroot not found!")
            else:
                cmd.append("fakeroot")

            fakechroot = util.which("fakechroot")
            if not fakechroot:
                tap.diag("WARNING: fakechroot not found!")
            else:
                cmd.append("fakechroot")

        if pacman["gdb"]:
            cmd.extend(["libtool", "execute", "gdb", "--args"])
        if pacman["valgrind"]:
            suppfile = os.path.join(os.path.dirname(__file__),
                    '..', '..', 'valgrind.supp')
            cmd.extend(["libtool", "execute", "valgrind", "-q",
                "--tool=memcheck", "--leak-check=full",
                "--show-reachable=yes",
                "--gen-suppressions=all",
                "--child-silent-after-fork=yes",
                "--log-file=%s" % os.path.join(self.root, "var/log/valgrind"),
                "--suppressions=%s" % suppfile])
            self.addrule("FILE_EMPTY=var/log/valgrind")

        # replace program name with absolute path
        prog = pacman["bin"]
        if not prog:
            prog = util.which(self.cmd[0], pacman["bindir"])
        if not prog or not os.access(prog, os.X_OK):
            if not prog:
                tap.bail("could not locate '%s' binary" % (self.cmd[0]))
                return

        cmd.append(os.path.abspath(prog))
        cmd.extend(self.cmd[1:])
        if pacman["manual-confirm"]:
            cmd.append("--confirm")
        if pacman["debug"]:
            cmd.append("--debug=%s" % pacman["debug"])
        cmd.extend(shlex.split(self.args))

        if not (pacman["gdb"] or pacman["nolog"]):
            output = open(os.path.join(self.root, util.LOGFILE), 'w')
        else:
            output = None
        vprint("\trunning: %s" % " ".join(cmd))

        # Change to the tmp dir before running pacman, so that local package
        # archives are made available more easily.
        time_start = time.time()
        self.retcode = subprocess.call(cmd, stdout=output, stderr=output,
                cwd=os.path.join(self.root, util.TMPDIR), env={'LC_ALL': 'C'})
        time_end = time.time()
        vprint("\ttime elapsed: %.2fs" % (time_end - time_start))

        if output:
            output.close()

        vprint("\tretcode = %s" % self.retcode)

        # Check if the lock is still there
        if os.path.isfile(util.PM_LOCK):
            tap.diag("\tERROR: %s not removed" % util.PM_LOCK)
            os.unlink(util.PM_LOCK)
        # Look for a core file
        if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")):
            tap.diag("\tERROR: pacman dumped a core file")
Exemple #28
0
 def outcome(self, state):
     for _, uterm in loopthru(
             all_unflatten(state.term), I_UNFLATTEN,
             'select an unflattened variant of %s' % state.term):
         vprint(P_UNFLATTENED, "Unflattened %s to %s" % (state.term, uterm))
         for join in get_candidate_join_unfold_terms(self.lp, uterm):
             # temporarily using unflatten here
             solver_start = time()
             equiv = self.solver.equivalent(self.unfolded_term,
                                            unflatten(join.induced_term(2)))
             solver_end = time()
             self.stats.log_join(state, join, solver_end - solver_start,
                                 equiv)
             if equiv:
                 self.hits += 1
                 if self.post_verification(
                         join,
                         4):  # used to be self.post_verification(join, 2)
                     vprint(P_SUCCESS_PATH, "\nSuccessful sequence:")
                     rewrite_seq = '\n'.join([
                         '%s -%d->' % (term, choice + 1)
                         for choice, term in zip(
                             self.rule_choice_record,
                             reversed(state.get_predecessors()))
                     ])
                     vprint(P_SUCCESS_PATH, rewrite_seq)
                     vprint(P_SUCCESS_PATH, state)
                     return join
                 else:
                     vprint(
                         P_MAIN,
                         "### Join failed to pass post-verification tests ###"
                     )
                     vprint(P_MAIN, join)
     return None
Exemple #29
0
    def run(self, pacman):
        if os.path.isfile(util.PM_LOCK):
            print "\tERROR: another pacman session is on-going -- skipping"
            return

        print "==> Running test"
        vprint("\tpacman %s" % self.args)

        cmd = [""]
        if os.geteuid() != 0:
            fakeroot = util.which("fakeroot")
            if not fakeroot:
                print "WARNING: fakeroot not found!"
            else:
                cmd.append("fakeroot")

            fakechroot = util.which("fakechroot")
            if fakechroot:
                cmd.append("fakechroot")

        if pacman["gdb"]:
            cmd.append("libtool execute gdb --args")
        if pacman["valgrind"]:
            cmd.append(
                "valgrind -q --tool=memcheck --leak-check=full --show-reachable=yes --suppressions=%s/valgrind.supp"
                % os.getcwd())
        cmd.append("\"%s\" --config=\"%s\" --root=\"%s\" --dbpath=\"%s\" --cachedir=\"%s\"" \
                   % (pacman["bin"],
                       os.path.join(self.root, util.PACCONF),
                       self.root,
                       os.path.join(self.root, util.PM_DBPATH),
                       os.path.join(self.root, util.PM_CACHEDIR)))
        if not pacman["manual-confirm"]:
            cmd.append("--noconfirm")
        if pacman["debug"]:
            cmd.append("--debug=%s" % pacman["debug"])
        cmd.append("%s" % self.args)
        if not pacman["gdb"] and not pacman["valgrind"] and not pacman["nolog"]:
            cmd.append(">\"%s\" 2>&1" % os.path.join(self.root, util.LOGFILE))
        vprint("\trunning: %s" % " ".join(cmd))

        # Change to the tmp dir before running pacman, so that local package
        # archives are made available more easily.
        curdir = os.getcwd()
        tmpdir = os.path.join(self.root, util.TMPDIR)
        os.chdir(tmpdir)

        time_start = time.time()
        self.retcode = os.system(" ".join(cmd))
        time_end = time.time()
        vprint("\ttime elapsed: %ds" % (time_end - time_start))

        if self.retcode == None:
            self.retcode = 0
        else:
            self.retcode /= 256
        vprint("\tretcode = %s" % self.retcode)
        os.chdir(curdir)

        # Check if the lock is still there
        if os.path.isfile(util.PM_LOCK):
            print "\tERROR: %s not removed" % util.PM_LOCK
            os.unlink(util.PM_LOCK)
        # Look for a core file
        if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")):
            print "\tERROR: pacman dumped a core file"
Exemple #30
0
 def load(self):
     """
     Loads parsed knowledge base data from modifiable data text files into global fields
     Typically called right after object initialization
     """
     self._load_foods()
     util.vprint('Loading cooking terminology')
     self._load_cooking_terms()
     self._load_cooking_wares()
     self._load_measurements()
     self._load_common_substitutions()
     self._load_style_tags()
     self._load_style_substitutions()
     util.vprint('Finished loading:')
     util.vprint('\t%s foods' % str(len(self.foods)))
     util.vprint('\t%s cooking wares' % str(len(self.cooking_wares)))
     util.vprint('\t%s measurements' % str(len(self.measurements)))
     util.vprint('\t%s italian to mexican' %
                 str(len(self.italian_to_mexican_list)))
     util.vprint('\t%s italian to asian' %
                 str(len(self.italian_to_asian_list)))
     util.vprint('\t%s asian to mexican' %
                 str(len(self.asian_to_mexican_list)))
     util.vprint('\t%s asian to italian' %
                 str(len(self.asian_to_italian_list)))
     util.vprint('\t%s mexican to italian' %
                 str(len(self.mexican_to_italian_list)))
     util.vprint('\t%s mexican to asian' %
                 str(len(self.mexican_to_asian_list)))
     util.vprint('\t%s common substitutions' %
                 str(len(self.common_substitutions)))
     util.vprint('\t%s vegan substitutions' %
                 str(len(self.vegan_substitutions)))
     util.vprint('\t%s vegetarian substitutions' %
                 str(len(self.vegetarian_substitutions)))
Exemple #31
0
    def generate(self, pacman):
        print "==> Generating test environment"

        # Cleanup leftover files from a previous test session
        if os.path.isdir(self.root):
            shutil.rmtree(self.root)
        vprint("\t%s" % self.root)

        # Create directory structure
        vprint("    Creating directory structure:")
        dbdir = os.path.join(self.root, util.PM_SYNCDBPATH)
        cachedir = os.path.join(self.root, util.PM_CACHEDIR)
        syncdir = os.path.join(self.root, util.SYNCREPO)
        tmpdir = os.path.join(self.root, util.TMPDIR)
        logdir = os.path.join(self.root, os.path.dirname(util.LOGFILE))
        etcdir = os.path.join(self.root, os.path.dirname(util.PACCONF))
        bindir = os.path.join(self.root, "bin")
        ldconfig = os.path.basename(pacman["ldconfig"])
        ldconfigdir = os.path.join(self.root, os.path.dirname(pacman["ldconfig"][1:]))
        shell = pacman["scriptlet-shell"][1:]
        shelldir = os.path.join(self.root, os.path.dirname(shell))
        sys_dirs = [dbdir, cachedir, syncdir, tmpdir, logdir, etcdir, bindir,
                    ldconfigdir, shelldir]
        for sys_dir in sys_dirs:
            if not os.path.isdir(sys_dir):
                vprint("\t%s" % sys_dir[len(self.root)+1:])
                os.makedirs(sys_dir, 0755)
        # Only the dynamically linked binary is needed for fakechroot
        shutil.copy("/bin/sh", bindir)
        if shell != "bin/sh":
            shutil.copy("/bin/sh", os.path.join(self.root, shell))
        shutil.copy(os.path.join(util.SELFPATH, "ldconfig.stub"),
            os.path.join(ldconfigdir, ldconfig))
        ld_so_conf = open(os.path.join(etcdir, "ld.so.conf"), "w")
        ld_so_conf.close()

        # Configuration file
        vprint("    Creating configuration file")
        util.mkcfgfile(util.PACCONF, self.root, self.option, self.db)

        # Creating packages
        vprint("    Creating package archives")
        for pkg in self.localpkgs:
            vprint("\t%s" % os.path.join(util.TMPDIR, pkg.filename()))
            pkg.finalize()
            pkg.makepkg(tmpdir)
        for key, value in self.db.iteritems():
            for pkg in value.pkgs:
                pkg.finalize()
            if key == "local" and not self.createlocalpkgs:
                continue
            for pkg in value.pkgs:
                vprint("\t%s" % os.path.join(util.PM_CACHEDIR, pkg.filename()))
                if self.cachepkgs:
                    pkg.makepkg(cachedir)
                else:
                    pkg.makepkg(os.path.join(syncdir, value.treename))
                pkg.md5sum = util.getmd5sum(pkg.path)
                pkg.csize = os.stat(pkg.path)[stat.ST_SIZE]

        # Creating sync database archives
        vprint("    Creating databases")
        for key, value in self.db.iteritems():
            vprint("\t" + value.treename)
            value.generate()

        # Filesystem
        vprint("    Populating file system")
        for f in self.filesystem:
            vprint("\t%s" % f)
            util.mkfile(self.root, f, f)
            path = os.path.join(self.root, f)
            if os.path.isfile(path):
                os.utime(path, (355, 355))
        for pkg in self.db["local"].pkgs:
            vprint("\tinstalling %s" % pkg.fullname())
            pkg.install_package(self.root)

        # Done.
        vprint("    Taking a snapshot of the file system")
        for roots, dirs, files in os.walk(self.root):
            for i in files:
                filename = os.path.join(roots, i)
                f = pmfile.PacmanFile(self.root, filename.replace(self.root + "/", ""))
                self.files.append(f)
                vprint("\t%s" % f.name)
Exemple #32
0
 def start_job(self, cmd, id):
     vprint(2, "Executing '%s' as job %s" % (cmd, id))
Exemple #33
0
    def run(self, pacman):
        if os.path.isfile(util.PM_LOCK):
            tap.bail("\tERROR: another pacman session is on-going -- skipping")
            return

        tap.diag("==> Running test")
        vprint("\tpacman %s" % self.args)

        cmd = []
        if os.geteuid() != 0:
            fakeroot = util.which("fakeroot")
            if not fakeroot:
                tap.diag("WARNING: fakeroot not found!")
            else:
                cmd.append("fakeroot")

            fakechroot = util.which("fakechroot")
            if not fakechroot:
                tap.diag("WARNING: fakechroot not found!")
            else:
                cmd.append("fakechroot")

        if pacman["gdb"]:
            cmd.extend(["libtool", "execute", "gdb", "--args"])
        if pacman["valgrind"]:
            suppfile = os.path.join(os.path.dirname(__file__), '..', '..',
                                    'valgrind.supp')
            cmd.extend([
                "libtool", "execute", "valgrind", "-q", "--tool=memcheck",
                "--leak-check=full", "--show-reachable=yes",
                "--gen-suppressions=all", "--child-silent-after-fork=yes",
                "--log-file=%s" % os.path.join(self.root, "var/log/valgrind"),
                "--suppressions=%s" % suppfile
            ])
            self.addrule("FILE_EMPTY=var/log/valgrind")

        # replace program name with absolute path
        prog = pacman["bin"]
        if not prog:
            prog = util.which(self.cmd[0], pacman["bindir"])
        if not prog or not os.access(prog, os.X_OK):
            if not prog:
                tap.bail("could not locate '%s' binary" % (self.cmd[0]))
                return

        cmd.append(os.path.abspath(prog))
        cmd.extend(self.cmd[1:])
        if pacman["manual-confirm"]:
            cmd.append("--confirm")
        if pacman["debug"]:
            cmd.append("--debug=%s" % pacman["debug"])
        cmd.extend(shlex.split(self.args))

        if not (pacman["gdb"] or pacman["nolog"]):
            output = open(os.path.join(self.root, util.LOGFILE), 'w')
        else:
            output = None
        vprint("\trunning: %s" % " ".join(cmd))

        # Change to the tmp dir before running pacman, so that local package
        # archives are made available more easily.
        time_start = time.time()
        self.retcode = subprocess.call(cmd,
                                       stdout=output,
                                       stderr=output,
                                       cwd=os.path.join(
                                           self.root, util.TMPDIR),
                                       env={'LC_ALL': 'C'})
        time_end = time.time()
        vprint("\ttime elapsed: %.2fs" % (time_end - time_start))

        if output:
            output.close()

        vprint("\tretcode = %s" % self.retcode)

        # Check if the lock is still there
        if os.path.isfile(util.PM_LOCK):
            tap.diag("\tERROR: %s not removed" % util.PM_LOCK)
            os.unlink(util.PM_LOCK)
        # Look for a core file
        if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")):
            tap.diag("\tERROR: pacman dumped a core file")
Exemple #34
0
    def run(self, pacman):
        if os.path.isfile(util.PM_LOCK):
            print "\tERROR: another pacman session is on-going -- skipping"
            return

        print "==> Running test"
        vprint("\tpacman %s" % self.args)

        cmd = [""]
        if os.geteuid() != 0:
            fakeroot = util.which("fakeroot")
            if not fakeroot:
                print "WARNING: fakeroot not found!"
            else:
                cmd.append("fakeroot")

            fakechroot = util.which("fakechroot")
            if fakechroot:
                cmd.append("fakechroot")

        if pacman["gdb"]:
            cmd.append("libtool execute gdb --args")
        if pacman["valgrind"]:
            cmd.append("valgrind -q --tool=memcheck --leak-check=full --show-reachable=yes --suppressions=%s/valgrind.supp" % os.getcwd())
        cmd.append("\"%s\" --config=\"%s\" --root=\"%s\" --dbpath=\"%s\" --cachedir=\"%s\"" \
                   % (pacman["bin"],
                       os.path.join(self.root, util.PACCONF),
                       self.root,
                       os.path.join(self.root, util.PM_DBPATH),
                       os.path.join(self.root, util.PM_CACHEDIR)))
        if not pacman["manual-confirm"]:
            cmd.append("--noconfirm")
        if pacman["debug"]:
            cmd.append("--debug=%s" % pacman["debug"])
        cmd.append("%s" % self.args)
        if not pacman["gdb"] and not pacman["valgrind"] and not pacman["nolog"]: 
            cmd.append(">\"%s\" 2>&1" % os.path.join(self.root, util.LOGFILE))
        vprint("\trunning: %s" % " ".join(cmd))

        # Change to the tmp dir before running pacman, so that local package
        # archives are made available more easily.
        curdir = os.getcwd()
        tmpdir = os.path.join(self.root, util.TMPDIR)
        os.chdir(tmpdir)

        time_start = time.time()
        self.retcode = os.system(" ".join(cmd))
        time_end = time.time()
        vprint("\ttime elapsed: %ds" % (time_end - time_start))

        if self.retcode == None:
            self.retcode = 0
        else:
            self.retcode /= 256
        vprint("\tretcode = %s" % self.retcode)
        os.chdir(curdir)

        # Check if the lock is still there
        if os.path.isfile(util.PM_LOCK):
            print "\tERROR: %s not removed" % util.PM_LOCK
            os.unlink(util.PM_LOCK)
        # Look for a core file
        if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")):
            print "\tERROR: pacman dumped a core file"
Exemple #35
0
    def run(self, pacman):
        if os.path.isfile(util.PM_LOCK):
            print "\tERROR: another pacman session is on-going -- skipping"
            return

        print "==> Running test"
        vprint("\tpacman %s" % self.args)

        cmd = []
        if os.geteuid() != 0:
            fakeroot = util.which("fakeroot")
            if not fakeroot:
                print "WARNING: fakeroot not found!"
            else:
                cmd.append("fakeroot")

            fakechroot = util.which("fakechroot")
            if not fakechroot:
                print "WARNING: fakechroot not found!"
            else:
                cmd.append("fakechroot")

        if pacman["gdb"]:
            cmd.extend(["libtool", "execute", "gdb", "--args"])
        if pacman["valgrind"]:
            suppfile = os.path.join(os.path.dirname(__file__),
                    '..', '..', 'valgrind.supp')
            cmd.extend(["libtool", "execute", "valgrind", "-q",
                "--tool=memcheck", "--leak-check=full",
                "--show-reachable=yes",
                "--suppressions=%s" % suppfile])
        cmd.extend([pacman["bin"],
            "--config", os.path.join(self.root, util.PACCONF),
            "--root", self.root,
            "--dbpath", os.path.join(self.root, util.PM_DBPATH),
            "--cachedir", os.path.join(self.root, util.PM_CACHEDIR)])
        if not pacman["manual-confirm"]:
            cmd.append("--noconfirm")
        if pacman["debug"]:
            cmd.append("--debug=%s" % pacman["debug"])
        cmd.extend(shlex.split(self.args))
        if not (pacman["gdb"] or pacman["valgrind"] or pacman["nolog"]):
            output = open(os.path.join(self.root, util.LOGFILE), 'w')
        else:
            output = None
        vprint("\trunning: %s" % " ".join(cmd))

        # Change to the tmp dir before running pacman, so that local package
        # archives are made available more easily.
        time_start = time.time()
        self.retcode = subprocess.call(cmd, stdout=output, stderr=output,
                cwd=os.path.join(self.root, util.TMPDIR), env={'LC_ALL': 'C'})
        time_end = time.time()
        vprint("\ttime elapsed: %.2fs" % (time_end - time_start))

        if output:
            output.close()

        vprint("\tretcode = %s" % self.retcode)

        # Check if the lock is still there
        if os.path.isfile(util.PM_LOCK):
            print "\tERROR: %s not removed" % util.PM_LOCK
            os.unlink(util.PM_LOCK)
        # Look for a core file
        if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")):
            print "\tERROR: pacman dumped a core file"
Exemple #36
0
                            contour_img[:, :, 0])
                output_img = (255 * (1 + output[i]) / 2).astype(int)
                skimage.io.imsave(
                    os.path.join(
                        result_path,
                        'img_' + str(ii) + '_' + str(iters // 1000) + '.jpg'),
                    output_img)
                ii += 1

        show_test_vals = [loss_G_val]
        if args.pre == 1:
            show_test_vals = [
                loss_recon_val, loss_G_val, loss_D_val, loss_adv_G_val
            ]
        util.vprint(iters, ['gen_lr', 'dis_lr'] + show_test_nms,
                    [gen_learning_rate_val, dis_learning_rate_val] +
                    show_test_vals)

        if np.isnan(loss_G_val.min()) or np.isnan(loss_G_val.max()):
            print("NaN detected!!")
            os._exit()

    image_paths = [
        trainset[idx[(iters % train_batch_nums) * batch_size + b]]
        for b in range(batch_size)
    ]

    if args.pre == 1:
        for _ in range(2):
            _, loss_D_val = sess.run([train_op_D, loss_D],
                                     feed_dict=d.dict(image_paths,