def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() db = pymongo.Connection() db.unique_cache.cache.ensure_index( [('expected', pymongo.ASCENDING)]) db.master_cache.cache.ensure_index( [('expected', pymongo.ASCENDING), ('program', pymongo.ASCENDING)]) first_line = sys.stdin.readline() assert first_line.startswith('argument:') arguments = [int(s, 0) for s in first_line.split()[1].split(',')] assert arguments == INABA_KEY, 'not inaba key' print 'ok' for line in sys.stdin: line = line.strip() if line.startswith('expected:'): expected = [int(s, 0) for s in line.split()[1].split(',')] else: db.unique_cache.cache.update( {'expected': expected}, {'$set': {'program': line}}, upsert=True) db.master_cache.cache.update( {'expected': expected, 'program': line}, {'$set': {}}, upsert=True)
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() # Solver existence checks assert os.path.exists(FLAGS.alice_solver) problems = frontend_util.GetProblemsByFlags() alice = Alice() for index, problem in enumerate(problems): if os.path.exists(BAILOUT_FILE): logging.info('') logging.info('Bailed out.') sys.exit(0) logging.info('******** PROBLEM %d/%d: %r ********', index + 1, len(problems), problem) logging.info('Flag to recover: --problem_id=%s --size=%d --operators=%s', problem.id, problem.size, ','.join(problem.operators)) try: with open(os.path.join(FLAGS.detail_log_dir, '%s.txt' % problem.id), 'w') as detail: Solve(problem, alice, detail) except api.Expired: logging.error('') logging.error(' !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ') logging.error('') logging.error(' P R O B L E M E X P I R E D') logging.error('') logging.error(' !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ') logging.error('') if not FLAGS.keep_going: raise
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() db = pymongo.Connection() db.unique_cache.cache.ensure_index([('expected', pymongo.ASCENDING)]) db.master_cache.cache.ensure_index([('expected', pymongo.ASCENDING), ('program', pymongo.ASCENDING)]) first_line = sys.stdin.readline() assert first_line.startswith('argument:') arguments = [int(s, 0) for s in first_line.split()[1].split(',')] assert arguments == INABA_KEY, 'not inaba key' print 'ok' for line in sys.stdin: line = line.strip() if line.startswith('expected:'): expected = [int(s, 0) for s in line.split()[1].split(',')] else: db.unique_cache.cache.update({'expected': expected}, {'$set': { 'program': line }}, upsert=True) db.master_cache.cache.update( { 'expected': expected, 'program': line }, {'$set': {}}, upsert=True)
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() problems = ReadProblemset() q = Queue.Queue() for problem in problems: q.put(problem) for _ in xrange(FLAGS.threads): th = threading.Thread(target=Worker, args=(q,)) th.daemon = True th.start() q.join()
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() # Solver existence checks assert os.path.exists(FLAGS.genall_solver) problems = frontend_util.GetProblemByFlags() for index, problem in enumerate(problems): logging.info('******** PROBLEM %d/%d: %r ********', index + 1, len(problems), problem) solver_output = subprocess.check_output( [FLAGS.genall_solver, '--size=%d' % problem.size, '--operators=%s' % ','.join(problem.operators)]) programs = solver_output.splitlines() logging.info('Candidate programs: %d', len(programs)) BruteForceGuess(problem, programs)
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() # Solver existence checks assert os.path.exists(FLAGS.cluster_solver) assert os.path.exists(FLAGS.batch_evaluate_solver) problems = frontend_util.GetProblemsByFlags() for index, problem in enumerate(problems): logging.info('******** PROBLEM %d/%d: %r ********', index + 1, len(problems), problem) arguments, clusters = RunClusterSolver(problem) cluster_sizes_decreasing = sorted( [len(programs) for _, programs in clusters], reverse=True) logging.info('Candidate programs: %d', sum(cluster_sizes_decreasing)) logging.info('Candidate clusters: %d', len(clusters)) #logging.info('Cluster sizes: %s', ', '.join(map(str, cluster_sizes_decreasing))) if FLAGS.max_cluster_size > 0 and cluster_sizes_decreasing[ 0] > FLAGS.max_cluster_size: logging.error('Maximum cluster size was above threshold (%d)', FLAGS.max_cluster_size) logging.error('Stop.') sys.exit(1) logging.info('Issueing /eval...') outputs = api.Eval(problem.id, arguments) clusters_map = dict([(tuple(e), c) for e, c in clusters]) programs = clusters_map.get(tuple(outputs), []) #logging.info('%r -> %r', arguments, outputs) logging.info('Selected a cluster with population=%d', len(programs)) BruteForceGuessOrDie(problem, programs)
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() # Solver existence checks assert os.path.exists(FLAGS.genall_solver) problems = frontend_util.GetProblemByFlags() for index, problem in enumerate(problems): logging.info('******** PROBLEM %d/%d: %r ********', index + 1, len(problems), problem) solver_output = subprocess.check_output([ FLAGS.genall_solver, '--size=%d' % problem.size, '--operators=%s' % ','.join(problem.operators) ]) programs = solver_output.splitlines() logging.info('Candidate programs: %d', len(programs)) BruteForceGuess(problem, programs)
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() # Solver existence checks assert os.path.exists(FLAGS.alice_solver) problems = frontend_util.GetProblemsByFlags() alice = Alice() for index, problem in enumerate(problems): if os.path.exists(BAILOUT_FILE): logging.info('') logging.info('Bailed out.') sys.exit(0) logging.info('******** PROBLEM %d/%d: %r ********', index + 1, len(problems), problem) logging.info( 'Flag to recover: --problem_id=%s --size=%d --operators=%s', problem.id, problem.size, ','.join(problem.operators)) try: with open( os.path.join(FLAGS.detail_log_dir, '%s.txt' % problem.id), 'w') as detail: Solve(problem, alice, detail) except api.Expired: logging.error('') logging.error(' !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ') logging.error('') logging.error(' P R O B L E M E X P I R E D') logging.error('') logging.error(' !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ') logging.error('') if not FLAGS.keep_going: raise
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() # Solver existence checks assert os.path.exists(FLAGS.cluster_solver) assert os.path.exists(FLAGS.batch_evaluate_solver) problems = frontend_util.GetProblemsByFlags() for index, problem in enumerate(problems): logging.info('******** PROBLEM %d/%d: %r ********', index + 1, len(problems), problem) arguments, clusters = RunClusterSolver(problem) cluster_sizes_decreasing = sorted( [len(programs) for _, programs in clusters], reverse=True) logging.info('Candidate programs: %d', sum(cluster_sizes_decreasing)) logging.info('Candidate clusters: %d', len(clusters)) #logging.info('Cluster sizes: %s', ', '.join(map(str, cluster_sizes_decreasing))) if FLAGS.max_cluster_size > 0 and cluster_sizes_decreasing[0] > FLAGS.max_cluster_size: logging.error('Maximum cluster size was above threshold (%d)', FLAGS.max_cluster_size) logging.error('Stop.') sys.exit(1) logging.info('Issueing /eval...') outputs = api.Eval(problem.id, arguments) clusters_map = dict([(tuple(e), c) for e, c in clusters]) programs = clusters_map.get(tuple(outputs), []) #logging.info('%r -> %r', arguments, outputs) logging.info('Selected a cluster with population=%d', len(programs)) BruteForceGuessOrDie(problem, programs)
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() # Solver existence checks assert os.path.exists(FLAGS.cluster_solver) assert os.path.exists(FLAGS.batch_evaluate_solver) problems = frontend_util.GetProblemsByFlags() for index, problem in enumerate(problems): if os.path.exists(BAILOUT_FILE): logging.info('') logging.info('Bailed out.') sys.exit(0) logging.info('******** PROBLEM %d/%d: %r ********', index + 1, len(problems), problem) logging.info( 'Flag to recover: --problem_id=%s --size=%d --operators=%s', problem.id, problem.size, ','.join(problem.operators)) arguments, clusters = RunClusterSolver(problem) cluster_sizes_decreasing = sorted( [len(programs) for _, programs, _, _ in clusters], reverse=True) logging.info('Candidate programs: %d', sum(cluster_sizes_decreasing)) logging.info('Candidate clusters: %d', len(clusters)) #logging.info('Cluster sizes: %s', ', '.join(map(str, cluster_sizes_decreasing))) logging.info('Issueing /eval...') outputs = api.Eval(problem.id, arguments) for c in clusters: c[3] = ComputeAtteruBitVector(c[0], outputs) # Make mapping by bit vectors. c_map = dict() for (_, p, c_bv, t_bv) in clusters: c_map.setdefault(c_bv, []).append(p[0]) t_map = dict() for (_, p, c_bv, t_bv) in clusters: t_map.setdefault(t_bv, []).append(p[0]) logging.info( 'C-T-E finder loop for %d (as-cond-pat: %d, as-te-pat: %d) clusters...', len(clusters), len(c_map), len(t_map)) fullbits = (1 << len(outputs)) - 1 cand = [] high_t_map = dict() for (t_bv, t_progs) in t_map.items(): bitnum = bin(t_bv).count('1') if bitnum >= (len(outputs) + 1) / 2: high_t_map[t_bv] = t_progs if len(high_t_map) < 500: # First filter by high_bit_candidates for (t_bv, t_progs) in high_t_map.items(): # find high bitters for (e_bv, e_progs) in t_map.items(): if (t_bv | e_bv) == fullbits: # must cover all cases for (c_bv, c_progs) in c_map.items(): c_bv_inv = ~c_bv & fullbits if (c_bv & t_bv) == c_bv and ( c_bv_inv & e_bv) == c_bv_inv: # c,t,e for c in c_progs: for t in t_progs: for e in e_progs: cand.append((c, t, e)) if (c_bv & e_bv) == c_bv and ( c_bv_inv & t_bv) == c_bv_inv: # c,e,t for c in c_progs: for e in e_progs: for t in t_progs: cand.append((c, e, t)) else: for (c_bv, c_progs) in c_map.items(): ok_as_then = [] ok_as_else = [] c_bv_inv = ~c_bv & fullbits for (t_bv, t_progs) in t_map.items(): if (c_bv & t_bv) == c_bv: ok_as_then.extend(t_progs) if (c_bv_inv & t_bv) == c_bv_inv: ok_as_else.extend(t_progs) for c in c_progs: for t in ok_as_then: for e in ok_as_else: cand.append((c, t, e)) logging.info("Candidate size: %d", len(cand)) try_counter_example_limit = 10000 if len(cand) > try_counter_example_limit: cand = random.sample(cand, try_counter_example_limit) logging.info("Reduced candidate size: %d", len(cand)) programs = [ConstructProgram(p) for p in cand] BruteForceGuessOrDie(problem, programs)
def main(): sys.argv = FLAGS(sys.argv) stdlog.setup() # Solver existence checks assert os.path.exists(FLAGS.cluster_solver) assert os.path.exists(FLAGS.batch_evaluate_solver) problems = frontend_util.GetProblemsByFlags() for index, problem in enumerate(problems): if os.path.exists(BAILOUT_FILE): logging.info('') logging.info('Bailed out.') sys.exit(0) logging.info('******** PROBLEM %d/%d: %r ********', index + 1, len(problems), problem) logging.info('Flag to recover: --problem_id=%s --size=%d --operators=%s', problem.id, problem.size, ','.join(problem.operators)) arguments, clusters = RunClusterSolver(problem) cluster_sizes_decreasing = sorted( [len(programs) for _, programs, _, _ in clusters], reverse=True) logging.info('Candidate programs: %d', sum(cluster_sizes_decreasing)) logging.info('Candidate clusters: %d', len(clusters)) #logging.info('Cluster sizes: %s', ', '.join(map(str, cluster_sizes_decreasing))) logging.info('Issueing /eval...') outputs = api.Eval(problem.id, arguments) for c in clusters: c[3] = ComputeAtteruBitVector(c[0], outputs) # Make mapping by bit vectors. c_map = dict() for (_,p,c_bv,t_bv) in clusters: c_map.setdefault(c_bv, []).append(p[0]) t_map = dict() for (_,p,c_bv,t_bv) in clusters: t_map.setdefault(t_bv, []).append(p[0]) logging.info('C-T-E finder loop for %d (as-cond-pat: %d, as-te-pat: %d) clusters...', len(clusters), len(c_map), len(t_map)) fullbits = (1 << len(outputs)) - 1 cand = [] high_t_map = dict() for (t_bv, t_progs) in t_map.items(): bitnum = bin(t_bv).count('1') if bitnum >= (len(outputs)+1)/2: high_t_map[t_bv] = t_progs if len(high_t_map) < 500: # First filter by high_bit_candidates for (t_bv, t_progs) in high_t_map.items(): # find high bitters for (e_bv, e_progs) in t_map.items(): if (t_bv | e_bv) == fullbits: # must cover all cases for (c_bv, c_progs) in c_map.items(): c_bv_inv = ~c_bv & fullbits if (c_bv & t_bv) == c_bv and (c_bv_inv & e_bv) == c_bv_inv: # c,t,e for c in c_progs: for t in t_progs: for e in e_progs: cand.append((c, t, e)) if (c_bv & e_bv) == c_bv and (c_bv_inv & t_bv) == c_bv_inv: # c,e,t for c in c_progs: for e in e_progs: for t in t_progs: cand.append((c, e, t)) else: for (c_bv, c_progs) in c_map.items(): ok_as_then = [] ok_as_else = [] c_bv_inv = ~c_bv & fullbits for (t_bv, t_progs) in t_map.items(): if (c_bv & t_bv) == c_bv: ok_as_then.extend(t_progs) if (c_bv_inv & t_bv) == c_bv_inv: ok_as_else.extend(t_progs) for c in c_progs: for t in ok_as_then: for e in ok_as_else: cand.append((c, t, e)) logging.info("Candidate size: %d", len(cand)) try_counter_example_limit = 10000 if len(cand) > try_counter_example_limit: cand = random.sample(cand, try_counter_example_limit) logging.info("Reduced candidate size: %d", len(cand)) programs = [ConstructProgram(p) for p in cand] BruteForceGuessOrDie(problem, programs)