def main(): set_namespace('env_stats') parser = OptionParser() parser.add_option("--db", default='flydra_db', help="FlydraDB directory") (options, args) = parser.parse_args() #@UnusedVariable db = FlydraDB(options.db, False) outdir = os.path.join(options.db, 'out/environment_stats') images = ["luminance", "contrast", "luminance_w", "contrast_w", "hluminance_w", "hcontrast_w"] for image in images: samples = [x for x in db.list_samples() if db.get_attr(x, 'stimulus', None) != 'nopost' and db.has_table(x, image)] if not samples: print "No samples for %s" % samples continue comp_prefix(image) data = comp(compute_environment_autocorrelation, options.db, samples, image) comp(create_report, data, image, outdir) db.close() compmake_console()
def main(): from compmake import comp, compmake_console, use_filesystem use_filesystem(os.path.join(Const.signals_dir, 'compmake')) for id_video, id_filter in itertools.product(Const.videos, Const.filters): if should_process(id_video, id_filter): comp(extract_signals, id_video, id_filter, job_id='extract-%s-%s' % (id_video, id_filter)) compmake_console()
def rlearn(config, parser): #@UnusedVariable t0 = time.time() """ Displays the learned DDS """ parser.add_option("-n", "--nthreads", help="Number of threads", type='int', default='4') parser.add_option("-r", "--nrefine", help="Number of time to refine learning", type='int', default='2') parser.add_option("-s", "--streams", help="Which streams to use.", default="*") parser.add_option("-i", "--comb", default="default") parser.add_option("-l", "--learners", help="Learner config.", default="*") parser.add_option("-o", "--output", help="Output directory", default='out/dp-rlearn/') parser.add_option("--sensels", default=None, help="Sensel indices to use for debuging refining module") parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") parser.add_option("--show", default=None, help="Name of learners to report") options = parser.parse_options() if options.show is not None: diffeomorphism2d_continuous.make_report(options.show.split(',')) sys.exit() nthreads = options.nthreads nrefine = options.nrefine learners = config.learners.expand_names(options.learners) streams = config.streams.expand_names(options.streams) if len(learners) > 1: logger.warn('Multiple learners are not supported for now') if len(streams) > 1: logger.warn('Multiple streams are not supported for now') id_comb = ",".join(streams) + "-" + ",".join(learners) outdir = os.path.join(options.output, id_comb) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() rm = ReportManager(os.path.join(outdir, 'reports')) jobs_rlearn(config, rm, learners, streams, outdir, nthreads, nrefine, options.sensels) # Time and report the learning comp(learning_times_rlearn, outdir, learners, streams, nthreads, nrefine) rm.create_index_job() if options.command: return batch_command(options.command) else: compmake_console() return 0 logger.info("Done after time: " + str(time.time() - t0) + ' seconds')
def main(): for log in logs: for script in scripts: job_id = '%s-%s' % (script, log) #config = {'logdir': "${PBENV_DATA}/rawseeds/%s" % log} config = {'logdir': log} comp(pg, script, config=config, job_id=job_id) compmake_console()
def main(): parser = OptionParser() parser.add_option("--db", default='flydra_db', help="Data directory") parser.add_option("--image", default="luminance", help="Rendered image to use -- " " corresponding to image 'saccades_view_{start,stop}_X'") parser.add_option("--interactive", help="Start an interactive compmake session." " Otherwise run in batch mode. ", default=False, action="store_true") (options, args) = parser.parse_args() #@UnusedVariable if options.db is None: logger.error('Please specify a directory using --db.') sys.exit(-1) view_start = 'saccades_view_start_%s' % options.image view_stop = 'saccades_view_stop_%s' % options.image view_rstop = 'saccades_view_rstop_%s' % options.image db = FlydraDB(options.db, False) # all samples with enough data all_available = lambda x: db.has_saccades(x) and \ db.has_table(x, view_start) and \ db.has_table(x, view_stop) and \ db.has_table(x, view_rstop) samples = filter(all_available, db.list_samples()) set_namespace('saccade_view_show_%s' % options.image) for sample in samples: comp_prefix(sample) comp(create_and_write_report, options.db, sample, options.image) if options.interactive: # start interactive session compmake_console() else: # batch mode # try to do everything batch_command('make all') # start the console if we are not done # (that is, make all failed for some reason) todo = list(parse_job_list('todo')) if todo: logger.info('Still %d jobs to do.' % len(todo)) sys.exit(-2)
def dp_predstats_main(config, parser): parser.add_option("-o", "--output", default='out/dp-pred-stats', help="Output directory") parser.add_option("-d", "--distances", default='*', help="Comma-separated list of distances. Can use *.") parser.add_option("-S", "--dds", help="Comma-separated list of diffeosystems.") parser.add_option("-s", "--streams", help="Comma-separated list of streams.") parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") options = parser.parse_options() if not options.streams: msg = 'Please specify streams using -s.' raise UserError(msg) if not options.dds: msg = 'Please specify which discdds to use.' raise UserError(msg) distances = config.distances.expand_names(options.distances) streams = config.streams.expand_names(options.streams) dds = config.streams.expand_names(options.dds) logger.info('Using distances: %s' % distances) logger.info('Using streams: %s' % streams) logger.info('Using discdds: %s' % dds) outdir = '%s/%s' % (options.output, options.dds) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() rm = ReportManager(os.path.join(outdir, "reports")) # for id_discdds in dds: create_predstats_jobs(config=config, distances=distances, id_discdds=dds, streams=streams, rm=rm, maxd=10) rm.create_index_job() if options.command: return batch_command(options.command) else: compmake_console() return 0
def dp_dist_stats(config, parser): #@UnusedVariable """ Computes statistics for images distances for different plan steps. """ parser.add_option("-o", "--output", default='out/dp-dist-stats', help="Output directory") parser.add_option("-d", "--distances", default='*', help="Comma-separated list of distances. Can use *.") parser.add_option("-s", "--streams", default='*', help="Comma-separated list of streams. Can use *.") parser.add_option("-r", "--repeat", default=1, type='int', help="Repeat many times.") parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") options = parser.parse_options() distances = natsorted(config.distances.expand_names(options.distances)) streams = natsorted(config.streams.expand_names(options.streams)) logger.info('Using distances: %s' % distances) logger.info('Using streams: %s' % streams) id_comb = ','.join(streams) + '-' + ','.join(distances) outdir = os.path.join(options.output, id_comb) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() rm = ReportManager(os.path.join(outdir, "reports")) create_diststats_jobs(config=config, distances=distances, streams=streams, rm=rm, maxd=10) rm.create_index_job() if options.command: return batch_command(options.command) else: compmake_console() return 0
def main(): use_filesystem(os.path.join(Const.signals_dir, 'compmake_stats')) # signals = list_signals() # only do the compound ones # signals = Const.osets.keys() for id_oset, id_filter, id_stat in itertools.product( Const.osets, Const.filters, Const.stats): signal = '%s-%s' % (id_oset, id_filter) comp(compute_and_write_stats, signal, id_stat, job_id='stats-%s-%s' % (signal, id_stat)) compmake_console()
def main(): set_namespace('hdf2bpi_all') parser = OptionParser() parser.add_option("--model", default=None, help="Only do this model") (options, args) = parser.parse_args() #@UnusedVariable if args: raise Exception('Extra arguments') Script = namedtuple('Script', 'job_prefix model input file_pattern params') scripts = [ Script('3cams', 'rawseeds2bpi_3cams', '{logid}.h5', '{logid}.camera.bpi', {}), Script('4lasers', 'rawseeds2bpi_4lasers', '{logid}.h5', '{logid}.4lasers.bpi', {}), Script('frontal', 'rawseeds2bpi_frontal', '{logid}.h5', '{logid}.frontal.bpi', {}), Script('sick_extract', 'bpi_extract', '{logid}.4lasers.bpi', '{logid}.sick.bpi', {}), Script('sickpc', 'hdf_wrap_bpi_filter', '{logid}.sick.bpi', '{logid}.sickpc.bpi', {'bpi_filter': 'bpi_popcode', 'bpi_filter.edges': 'edges_sick.pickle' }), Script('sickpc_all', 'hdf_wrap_bpi_filter', '{logid}.sick.bpi', '{logid}.sickpca.bpi', {'bpi_filter': 'bpi_popcode', 'bpi_filter.edges': 'edges_sick-all.pickle' }), ] if not os.path.exists(hdf_dir): raise Exception('Input dir %r does not exist.' % hdf_dir) if not os.path.exists(hdf_dir): os.makedirs(hdf_dir) for log in logs: for script in scripts: hdf = os.path.join(hdf_dir, script.input.format(logid=log)) bpi = os.path.join(hdf_dir, script.file_pattern.format(logid=log)) job_id = '%s-%s' % (script.job_prefix, log) # if os.path.exists(bpi): # print('File %r already exists; skipping creation of job %r.' % # (bpi, job_id)) # continue comp(convert_hdf2bpi, script.model, hdf, bpi, script.params, job_id=job_id) compmake_console()
def main(): use_filesystem(os.path.join(Const.signals_dir, 'compmake_join')) sets = {} for fname in Const.filters.keys(): mkname = lambda x: '%s-%s' % (x, fname) for master, pieces in Const.osets.items(): sets[mkname(master)] = [mkname(x) for x in pieces] for master, pieces in sets.items(): comp(join_signals, master, pieces, job_id='join-%s' % master) compmake_console()
def dp_batch_main(config, parser): """ Runs batch planning experiments from batch configuration files. """ parser.add_option("-o", "--output", default='out/dp-batch', help="Output directory") parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") options, which = parser.parse() if not which: todo = config.sets.keys() id_comb = 'all' else: todo = config.sets.expand_names(which) id_comb = "+".join(sorted(todo)) logger.info('Batch sets to do: %s' % todo) outdir = os.path.join(options.output, 'set-%s' % id_comb) # Compmake storage for results storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() for id_set in todo: logger.info('Instantiating batch set %s' % id_set) spec = config.sets[id_set] try: algos = config.algos.expand_names(spec['algorithms']) testcases = config.testcases.expand_names(spec['testcases']) comp_prefix('%s' % id_set) b_outdir = os.path.join(outdir, id_set) create_bench_jobs(config=config, algos=algos, testcases=testcases, outdir=b_outdir) except: logger.error('Error while instantiating batch\n%s' % pformat(spec)) raise if options.command: return batch_command(options.command) else: compmake_console() return 0
def main(): usage = '' # my comment parser = OptionParser(usage=usage) parser.add_option("--bigdata", default="~/BIGDATA", help="Base directory") parser.add_option("--outdir", help="Destination base directory") (options, args) = parser.parse_args() if args: raise Exception('trailing') if options.outdir is None: raise Exception('Specify outdir') bigdata = os.path.expanduser(options.bigdata) bigdata = os.path.expandvars(bigdata) conf2logs = {} conf2logs['conf1b'] = list_logdirs(os.path.join(bigdata, "er1-logs_compact_better", "conf1")) conf2logs['conf2b'] = list_logdirs(os.path.join(bigdata, "er1-logs_compact_better", "conf2")) # what to run # interface is {logdir, logname, outdir} conf2pg = {} conf2pg['conf1b'] = ['er1b_video01', 'er1b_video0_alone', 'er1b_video1_alone', 'er1conv_video0_bw_full', 'er1conv_video0_bw_small', 'er1conv_video01_bw_full', 'er1conv_video01_bw_small'] conf2pg['conf2b'] = conf2pg['conf1b'] for conf in conf2pg: logs = conf2logs[conf] pgs = conf2pg[conf] for logdir, pg in itertools.product(logs, pgs): #@UndefinedVariable logname = os.path.basename(logdir) outdir = os.path.join(options.outdir, conf) job_id = '%s-%s-%s' % (conf, pg, logname) done_file = os.path.join(options.outdir, 'done', '%s-finished.txt' % job_id) make_sure_dir_exists(done_file) config = dict(logdir=logdir, outdir=outdir, logname=logname) comp(run_pg_script, pg, config, done_file, job_id=job_id) compmake_console()
def plearn(config, parser): """ Learn the diffeomorphisms in parallel. """ # parser.add_option("-i", "--id_image", help="ID image.", default='lena') ncpus = multiprocessing.cpu_count() parser.add_option("-n", "--nthreads", help="Number of threads", type='int', default=ncpus) parser.add_option("-s", "--streams", help="Which streams to use.", default="*") parser.add_option("-i", "--comb", default="default") parser.add_option("-l", "--learners", help="Learner config.", default="*") parser.add_option("-o", "--output", help="Output directory", default='out/dp-plearn/') parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") options = parser.parse_options() nthreads = options.nthreads learners = config.learners.expand_names(options.learners) streams = config.streams.expand_names(options.streams) id_comb = ",".join(streams) + "-" + ",".join(learners) outdir = os.path.join(options.output, id_comb) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() rm = ReportManager(os.path.join(outdir, 'reports')) jobs_plearn(config, rm, learners, streams, outdir, nthreads) rm.create_index_job() # Time and report the learning comp(learning_times_plearn, outdir, learners, streams, nthreads) if options.command: return batch_command(options.command) else: compmake_console() return 0
def main(): set_namespace('rawseeds2hdf') if not os.path.exists(rawseeds_dir): raise Exception('Input dir %r does not exist.' % hdf_dir) if not os.path.exists(hdf_dir): os.makedirs(hdf_dir) for log in logs: logdir = os.path.join(rawseeds_dir, log) hdf = os.path.join(hdf_dir, '%s.h5' % log) job_id = 'rawseeds2hdf-%s' % log comp(convert_rawseeds2hdf, logdir, hdf, job_id=job_id) compmake_console()
def dp_bench_main(config, parser): # @UnusedVariable """ Runs a set of planning experiments. """ parser.add_option("-a", "--algorithms", default='*', help="Comma-separated list of algorithms. Can use *.") parser.add_option("-t", "--testcases", default='*', help="Comma-separated list of algorithms. Can use *.") parser.add_option("-o", "--output", default='out/dp-bench', help="Output directory") parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") options = parser.parse_options() algos = config.algos.expand_names(options.algorithms) testcases = config.testcases.expand_names(options.testcases) logger.info('Using %d algorithms: %s' % (len(algos), algos)) logger.info('Using %d testcases.' % (len(testcases))) outdir = options.output # Compmake storage for results storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() create_bench_jobs(config=config, algos=algos, testcases=testcases, outdir=outdir) if options.command: return batch_command(options.command) else: compmake_console() return 0
def main(): np.seterr(all='raise') parser = OptionParser(usage=description) parser.add_option("--db", help="Main data directory") parser.add_option("--interactive", action="store_true", default=False, help="Starts an interactive compmake session.") parser.add_option("--outdir", help="Output directory") (options, args) = parser.parse_args() #@UnusedVariable try: if args: raise Exception('Spurious arguments %r.' % args) if not options.db: raise Exception('Please provide --db option') if not options.outdir: raise Exception('Please provide --outdir option') except Exception as e: logger.error('Error while parsing configuration.') logger.error(str(e)) sys.exit(-1) try: compmake_dir = os.path.join(options.outdir, 'compmake') use_filesystem(compmake_dir) with safe_flydra_db_open(options.db) as db: spontaneous_analysis(db, options.outdir) compmake_console() except Exception as e: logger.error('Error while processing. Exception and traceback follow.') logger.error(str(e)) logger.error(traceback.format_exc()) sys.exit(-2)
def main(): outdir = 'test/repman' storage = os.path.join(outdir, 'compmake') use_filesystem(storage) rm = ReportManager(outdir) report = comp(make_rep1, 'TestReport3') report2 = comp(make_rep1, 'TestReport4') rm.add(report, 'rep3') rm.add(report2, 'rep4') rm.create_index_job() read_rc_files() compmake_console()
def main(): outdir = "test/repman" storage = os.path.join(outdir, "compmake") use_filesystem(storage) rm = ReportManager(outdir) report = comp(make_rep1, "TestReport3") report2 = comp(make_rep1, "TestReport4") rm.add(report, "rep3") rm.add(report2, "rep4") rm.create_index_job() read_rc_files() compmake_console()
def uncert(config, parser): parser.add_option("-S", "--dds", help="DDS sytem .") parser.add_option("-c", "--command", \ help="Command to pass to compmake for batch mode") parser.add_option("-o", "--output", help="Output directory", \ default='out/dp-precision/') parser.add_option("-s", "--streams", help="Which streams to use.", \ default="*") parser.add_option("-d", "--distances", default='L2,L2w', \ help="Distances id to use for comparing the diffeo systems") parser.add_option("-l", "--length", default=9, type='int', \ help="Max length of test cases") options = parser.parse_options() dds = config.discdds.expand_names(options.dds) streams = config.streams.expand_names(options.streams) id_comb = ",".join(dds) outdir = os.path.join(options.output, id_comb) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() rm = ReportManager(os.path.join(outdir, 'reports')) max_delta = options.length distances = config.distances.expand_names(options.distances) store = create_stats_jobs(config, dds, streams, distances, max_delta, outdir) records = comp(make_records, store) report = comp(report_stats, records, dds, streams, distances) r0 = comp(empty_report) rm.add(report, 'main', id_dds='dds') rm.add(r0, 'empty') rm.create_index_job() if options.command: return batch_command(options.command) else: compmake_console() return 0
def uncert(config, parser): # @UnusedVariable parser.add_option("-s", "--streams", help="Which streams to use.", default="*") parser.add_option("-S", "--dds", help="DDS sytem .") parser.add_option("-o", "--output", help="Output directory", default='out/dp-uncert/') parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") options = parser.parse_options() dds = config.discdds.expand_names(options.dds) streams = config.streams.expand_names(options.streams) id_comb = ",".join(dds) + "-" + ",".join(streams) outdir = os.path.join(options.output, id_comb) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() rm = ReportManager(os.path.join(outdir, 'reports')) max_delta = 9 store = create_uncert_stats_jobs(config, dds, streams, max_delta, outdir) records = comp(make_records, store) report = comp(report_uncert_stats, records, dds) r0 = comp(empty_report) rm.add(report, 'main', id_dds='dds') rm.add(r0, 'empty') rm.create_index_job() if options.command: return batch_command(options.command) else: compmake_console() return 0
def main(): parser = OptionParser(usage=description) parser.add_option("--db", default='flydra_db', help="FlydraDB directory") parser.add_option("--interactive", help="Start compmake interactive session." " Otherwise run in batch mode", default=False, action="store_true") (options, args) = parser.parse_args() #@UnusedVariable db = FlydraDB(options.db, False) set_namespace('video_contrast') samples = db.list_samples() if not samples: print 'No samples found' for id in samples: if db.has_rows(id) and db.has_table(id, 'contrast') and \ db.has_table(id, 'luminance'): config = {'sample': id, 'db': options.db} comp(pg, 'flydra_display_contrast', config, job_id="flydra_display_contrast:%s" % id) if options.interactive: # start interactive session compmake_console() else: # batch mode # try to do everything batch_command('make all') # start the console if we are not done # (that is, make all failed for some reason) todo = list(parse_job_list('todo')) if todo: print('Still %d jobs to do.' % len(todo)) sys.exit(-2)
def pysnip_make(dirname, compmake_command): files = glob(os.path.join(dirname, '*.py')) prefixes = [os.path.splitext(os.path.basename(f))[0] for f in files] logger.info('Found %d snippets in directory %s' % (len(prefixes), dirname)) use_filesystem(os.path.join(dirname, '.compmake')) ntodo = 0 for p in prefixes: job = Job(dirname, p) job_id = job.basename current_state = None if job_exists(job_id): current_state = get_job_cache(job_id).state if job.status == DONE_UPTODATE: # logger.info('%s: done' % job.basename) if current_state != Cache.DONE: mark_as_done(job_id) pass elif job.status == FAILED: # logger.info('%s: failed' % job.basename) if current_state != Cache.FAILED: mark_as_failed(job_id) elif job.status == DONE_NEEDSUPDATE: mark_as_notstarted(job_id) # logger.info('%s: done (but needs update)' % job.basename) pass elif job.status == NOTSTARTED: mark_as_notstarted(job_id) # logger.info('%s: not started' % job.basename) pass comp(run_job, job, job_id=job_id) if job.status != DONE_UPTODATE: ntodo += 1 # logger.info('%d/%d jobs to do' % (ntodo, len(prefixes))) batch_command('stats') if compmake_command is not None: return batch_command(compmake_command) else: compmake_console() return 0
def main(): print('We will now define a hierarchy of jobs.') print('Each one can fail randomly with probability %f.' % failure_prob) branch = 20 args = sys.argv[1:] if args: branch = int(args.pop(0)) for i in range(branch): ijobs = [] for j in range(branch): kjobs = [] for k in range(branch): kjobs.append(comp(third, job_id='%d-%d-%d' % (i, j, k))) ijobs.append(comp(second, kjobs, job_id='%d-%d' % (i, j))) comp(first, ijobs, job_id='%d' % i) compmake_console()
def pixlearn(config, parser): parser.add_option("-n", "--nthreads", help="Number of threads", type='int', default='4') parser.add_option("--distribute", type='str', default='random', help="Strategy to distribute sensles to threads") parser.add_option("-s", "--id_stream", help="Which streams to use.", default="*") parser.add_option("-o", "--output", help="Output directory", default='out/dp-pixlearn/') parser.add_option("-l", "--id_learner", help="Learner config.", default="*") parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") options = parser.parse_options() nthreads = options.nthreads id_learner = config.learners.expand_names(options.id_learner) id_stream = config.streams.expand_names(options.id_stream) id_comb = ",".join(id_stream) + "-" + ",".join(id_learner) outdir = os.path.join(options.output, id_comb) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() rm = ReportManager(os.path.join(outdir, 'reports')) pdb.set_trace() if options.distribute == 'random': max_nsensels = 40 * 30 # Generate a list with range of all indexes and assign them to threads all_indicies = np.array(range(max_nsensels)) dist = np.random.randint(nthreads, size=max_nsensels) sensel_indexes = [] for i in range(nthreads): sensel_indexes.append(all_indicies[dist == i]) if options.distribute == 'demo4': sensel_indexes = [[0, 1, 2, 3, 40, 41, 42, 43, 80, 81, 82, 83, 120, 121, 122, 123], [340, 341, 342, 343, 380, 381, 382, 383, 420, 421, 422, 423, 460, 461, 462, 463], [650, 651, 652, 653, 690, 691, 692, 693, 730, 731, 732, 733, 770, 771, 772, 773], [1076, 1077, 1078, 1079, 1116, 1117, 1118, 1119, 1156, 1157, 1158, 1159, 1196, 1197, 1198, 1199]] if len(id_learner) > 1 or len(id_stream) > 1: logger.warning('learners and streams after index 0 will be ignored') id_learner = id_learner[0] id_stream = id_stream[0] commands = [[256, 0, 0], [-256, 0, 0]] states = [[100]] # # Parallel part of code # Initiate parallel learning group = [] for i in range(nthreads): estimator_i = comp(sensel_group_learn, config, id_learner, id_stream, sensel_indexes[i]) group.append(estimator_i) diffeo_system_i = comp(estimator_summarize, estimator_i, commands, states, 'diffeo' + str(i)) estimator_report = comp(report_estimator, 'estimator' + str(i), estimator_i) rm.add(estimator_report, 'estimator' + str(i), id_learner=id_learner, id_stream=id_stream) diffeo_report = comp(report_dds, 'diffeo' + str(i), diffeo_system_i) rm.add(diffeo_report, 'diffeo' + str(i), id_learner=id_learner, id_stream=id_stream) estimator_main = comp(join_estimators, group) main_system = comp(estimator_summarize, estimator_main, commands, states, 'diffeo' + str(i)) diffeo_report = comp(report_dds, 'dds-%s-%s' % (id_stream, id_learner), main_system, job_id='learn-%s-%s-summarize-report' % (id_stream, id_learner)) rm.add(comp(empty_report), 'empty') rm.add(diffeo_report, 'dds', id_learner=id_learner, id_stream=id_stream) rm.create_index_job() if options.command: # return batch_command(options.command) batch_command(options.command) else: compmake_console()
# we now add an exception if param2 == 11: raise Exception('11 is your unlucky number.') print('func2(%s, %s)' % (res1, param2)) time.sleep(1) return res1 + param1 def draw(res2): print('draw(%s)' % res2) print('Defining jobs...') for param1 in [1,2,3]: for param2 in [10,11,12]: res1 = comp(func1, param1) res2 = comp(func2, res1, param2) comp(draw, res2) print('Ready to run...') import compmake if compmake.is_inside_compmake_script(): print('Detected that we were imported by compmake.') else: interactive = True if interactive: print('Presenting an interactive console') compmake.compmake_console() else: print('Running the computation in batch mode') compmake.batch_command('parmake n=4')
def rlearn(config, parser): #@UnusedVariable t0 = time.time() """ Displays the learned DDS """ parser.add_option("-n", "--nthreads", help="Number of threads", type='int', default='4') parser.add_option("-r", "--nrefine", help="Number of time to refine learning", type='int', default='2') parser.add_option("-s", "--streams", help="Which streams to use.", default="*") parser.add_option("-i", "--comb", default="default") parser.add_option("-l", "--learners", help="Learner config.", default="*") parser.add_option("-o", "--output", help="Output directory", default='out/dp-rlearn/') parser.add_option( "--sensels", default=None, help="Sensel indices to use for debuging refining module") parser.add_option("-c", "--command", help="Command to pass to compmake for batch mode") parser.add_option("--show", default=None, help="Name of learners to report") options = parser.parse_options() if options.show is not None: diffeomorphism2d_continuous.make_report(options.show.split(',')) sys.exit() nthreads = options.nthreads nrefine = options.nrefine learners = config.learners.expand_names(options.learners) streams = config.streams.expand_names(options.streams) if len(learners) > 1: logger.warn('Multiple learners are not supported for now') if len(streams) > 1: logger.warn('Multiple streams are not supported for now') id_comb = ",".join(streams) + "-" + ",".join(learners) outdir = os.path.join(options.output, id_comb) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() rm = ReportManager(os.path.join(outdir, 'reports')) jobs_rlearn(config, rm, learners, streams, outdir, nthreads, nrefine, options.sensels) # Time and report the learning comp(learning_times_rlearn, outdir, learners, streams, nthreads, nrefine) rm.create_index_job() if options.command: return batch_command(options.command) else: compmake_console() return 0 logger.info("Done after time: " + str(time.time() - t0) + ' seconds')
def report_main(args): # np.seterr(all='raise') parser = LenientOptionParser(usage=description) parser.add_option("--db", help="Main data directory") parser.add_option("--outdir", help="Output directory for reports") parser.add_option("--datadir", help="Output directory for compmake files") parser.add_option("--version_rows", help="Table version ('kf' or 'smooth')") parser.add_option("--version_saccades", help="Table version ('kf', 'smooth', 'angvel')") parser.add_option("--group", help="Sample group", default='nopost') parser.add_option("--ncells_distance", type='int', default=20, help="Discretization for distance") parser.add_option("--ncells_axis_angle", type='int', default=36, help="Discretization for axis angle") parser.add_option("--compmake_command", default=None, help="Execute the CompMake command and exit.") parser.add_option("--pdf", default=False, action='store_true', help="Uses PDF for the reports (slower).") (options, args) = parser.parse_args(args) # @UnusedVariable check_no_spurious(args) check_mandatory(options, ['db', 'outdir', 'datadir', 'version_rows', 'version_saccades']) if options.pdf: logger.info('Using PDF for plots.') RepRepDefaults.default_image_format = MIME_PDF PlotParams.init_matplotlib() confid = '%s-%s-%s-D%d-A%d' % (options.group, options.version_rows, options.version_saccades, options.ncells_distance, options.ncells_axis_angle) compmake_dir = os.path.join(options.datadir, confid) use_filesystem(compmake_dir) logger.info('Storing computation in %r.' % compmake_dir) arena_radius = ParamsEstimation.arena_radius warnings.warn('Using hardcoded arena radius %s.' % arena_radius) cells = DACells( ncells_distance=options.ncells_distance, ncells_axis_angle=options.ncells_axis_angle, arena_radius=arena_radius, min_distance=ParamsEstimation.min_distance, bin_enlarge_angle=ParamsEstimation.bin_enlarge_angle, bin_enlarge_dist=ParamsEstimation.bin_enlarge_dist) stats = comp(get_group_density_stats, options.db, options.group, options.version_rows, cells) saccades = comp(get_saccades_for_group, options.db, options.group, options.version_saccades) saccades_stats = comp(compute_histogram_saccades, saccades, cells) joint_stats = comp(compute_joint_statistics, stats, saccades_stats) joint_stats = comp(compute_visual_stimulus, joint_stats) report = comp(report_stats, confid, stats, saccades_stats) rd = os.path.join(options.outdir, 'images') html = os.path.join(options.outdir, "%s.html" % confid) comp(write_report, report, html, rd, job_id='report_stats-write') report_m = comp(report_models_choice, confid, joint_stats) html = os.path.join(options.outdir, "%s_models.html" % confid) comp(write_report, report_m, html, rd, job_id='report_models_choice-write') report_s = comp(report_visual_stimulus, confid, joint_stats, job_id='report_stimulus') html = os.path.join(options.outdir, "%s_stimulus.html" % confid) comp(write_report, report_s, html, rd, job_id='report_stimulus-write') report_i = comp(report_intuitive, confid, joint_stats, job_id='report_intuitive') html = os.path.join(options.outdir, "%s_intuitive.html" % confid) comp(write_report, report_i, html, rd, job_id='report_intuitive-write') comp(write_report, comp(report_saccades, confid, saccades, job_id='report_saccades'), html=os.path.join(options.outdir, "%s_saccades.html" % confid), rd=rd, job_id='report_saccades-write') comp(write_report, comp(report_traj, confid, options.db, options.group, options.version_rows, job_id='report_traj'), html=os.path.join(options.outdir, "%s_traj.html" % confid), rd=rd, job_id='report_traj-write') if options.compmake_command is not None: compmake.batch_command(options.compmake_command) else: compmake_console()
def main(): parser = OptionParser(usage=description) parser.add_option( "--db", default='flydra_db_directory', help="FlydraDB directory") parser.add_option("--model", help="ProcGraph model name.") parser.add_option( "--needs", help="Comma-separated list of tables required", default="rows,luminance") parser.add_option( "--interactive", help="Start compmake interactive session." " Otherwise run in batch mode", default=False, action="store_true") (options, args) = parser.parse_args() if options.model is None: print "Please specify the model." sys.exit(-3) print("Using FlydraDB directory %r." % options.db) db = FlydraDB(options.db, False) # TODO: make the storage inside options.db? set_namespace('run_pg_model_%s' % options.model) tables = options.needs.split(',') if args: samples = args for sample in samples: if not db.has_sample(sample): raise Exception('Unknown sample %r' % sample) else: samples = db.list_samples() if not samples: print 'No samples found' num_ok = 0 for id in samples: enough = all(map(lambda t: db.has_table(id, t), tables)) if not enough: continue num_ok += 1 config = {'sample': id, 'db': options.db} comp(pg, options.model, config, job_id=id) logger.info( "Found %d/%d samples with tables %s." % (num_ok, len(samples), tables)) if options.interactive: # start interactive session compmake_console() else: # batch mode # try to do everything batch_command('make all') # start the console if we are not done # (that is, make all failed for some reason) todo = list(parse_job_list('todo')) if todo: logger.info('Still %d jobs to do.' % len(todo)) sys.exit(-2)
def go(self): # check that if we have a parent who is a quickapp, # then use its context qapp_parent = self.get_qapp_parent() if qapp_parent is not None: # self.info('Found parent: %s' % qapp_parent) context = qapp_parent.child_context self.define_jobs_context(context) return else: # self.info('Parent not found') pass if False: import resource gbs = 5 max_mem = long(gbs * 1000 * 1048576L) resource.setrlimit(resource.RLIMIT_AS, (max_mem, -1)) resource.setrlimit(resource.RLIMIT_DATA, (max_mem, -1)) options = self.get_options() if self.get_qapp_parent() is None: # only do this if somebody didn't do it before if not options.contracts: msg = 'PyContracts disabled for speed. Use --contracts to activate.' self.logger.warning(msg) contracts.disable_all() warnings.warn('removed configuration below') # (start) output_dir = options.output # Compmake storage for results storage = os.path.join(output_dir, 'compmake') sf = StorageFilesystem(storage, compress=True) # sf = StorageFilesystem2(directory) # sf = MemoryCache(sf) set_compmake_db(sf) # use_filesystem(storage) read_rc_files() context = CompmakeContext(parent=None, qapp=self, job_prefix=None, output_dir=output_dir) self.context = context original = get_comp_prefix() self.define_jobs_context(context) comp_prefix(original) context.finalize_jobs() if context.n_comp_invocations == 0: # self.comp was never called msg = 'No jobs defined.' raise ValueError(msg) else: if not options.console: batch_result = batch_command(options.command) if isinstance(batch_result, str): ret = QUICKAPP_COMPUTATION_ERROR elif isinstance(batch_result, int): if batch_result == 0: ret = 0 else: # xxx: discarded information ret = QUICKAPP_COMPUTATION_ERROR else: assert False return ret else: compmake_console() return 0
def idealize_uncert(config, parser): ''' Assumes a constant displacement over the whole sensor domain ''' parser.add_option("-S", "--dds", help="DDS sytem .") parser.add_option("-c", "--command", \ help="Command to pass to compmake for batch mode") parser.add_option("-o", "--output", help="Output directory", \ default='out/idealized-dds/') options = parser.parse_options() id_discdds = options.dds dds = config.discdds.instance(id_discdds) outdir = os.path.join(options.output, id_discdds) storage = os.path.join(outdir, 'compmake') use_filesystem(storage) read_rc_files() # rm = ReportManager(os.path.join(outdir, 'reports')) # Ideal id_iu_discdds = 'iu-' + id_discdds iu_dds = comp(_idealize_uncert, dds, job_id='idealize_uncert') comp(save_results, id_iu_discdds, outdir, iu_dds) diffeo_report = comp(report_dds, 'iu_dds-%s' % id_discdds, iu_dds) rm.add(diffeo_report, 'iu-dds', id_learner='idealized-uncertainty') # Relative id_uur_discdds = 'uur-' + id_discdds dds_copyr = copy.copy(dds) uur_dds = comp(_update_uncert, dds_copyr, length_score_norm_relative, job_id='update_uncert_relative') comp(save_results, id_uur_discdds, outdir, uur_dds, job_id='update_uncert_relative_save') diffeo_report = comp(report_dds, 'uur-dds-%s' % id_discdds, uur_dds, job_id='update_uncert_relative_report') rm.add(diffeo_report, 'uur-dds', id_learner='updated-uncertainty-uur') # Absolute dds_copya = copy.copy(dds) id_uua_discdds = 'uua-' + id_discdds uua_dds = comp(_update_uncert, dds_copya, length_score_norm, job_id='update_uncert_absolute') comp(save_results, id_uua_discdds, outdir, uua_dds, job_id='update_uncert_absolute_save') diffeo_report = comp(report_dds, 'uua-dds-%s' % id_discdds, uua_dds, job_id='update_uncert_absolute_report') rm.add(diffeo_report, 'uua-dds', id_learner='updated-uncertainty-uua') rm.create_index_job() if options.command: return batch_command(options.command) else: # pdb.set_trace() compmake_console() return 0
def main(): parser = OptionParser() group = OptionGroup(parser, "Files and directories") group.add_option("--outdir", help='Directory with variables.pickle and where ' 'the output will be placed.') parser.add_option_group(group) group = OptionGroup(parser, "Experiments options") group.add_option("--fast", default=False, action='store_true', help='Disables sanity checks.') group.add_option("--set", default='*', help='[= %default] Which combinations to run.') group.add_option("--seed", default=None, type='int', help='[= %default] Seed for random number generator.') parser.add_option_group(group) group = OptionGroup(parser, "Compmake options") group.add_option("--remake", default=False, action='store_true', help='Remakes all (non interactive).') group.add_option("--report", default=False, action='store_true', help='Cleans and redoes all reports (non interactive).') group.add_option("--report_stats", default=False, action='store_true', help='Cleans and redoes the reports for the stats. (non interactive)') parser.add_option_group(group) (options, args) = parser.parse_args() #@UnusedVariable np.random.seed(options.seed) if options.fast: disable_all() assert not args assert options.outdir is not None available_algorithms, available_test_cases, available_sets = get_everything() which = expand_string(options.set, list(available_sets.keys())) if len(which) == 1: compmake_storage = os.path.join(options.outdir, 'compmake', which[0]) else: compmake_storage = os.path.join(options.outdir, 'compmake', 'common_storage') use_filesystem(compmake_storage) print('Staging creation of test cases reports') test_cases = {} test_case_reports = {} def stage_test_case_report(tcid): if not tcid in available_test_cases: msg = ('Could not find test case %r \n %s' % (tcid, available_test_cases.keys())) raise Exception(msg) if not tcid in test_cases: command, args = available_test_cases[tcid] job_id = 'test_case_data-%s' % tcid test_cases[tcid] = comp(command, job_id=job_id, **args) if not tcid in test_case_reports: job_id = 'test_case-%s-report' % tcid report = comp(create_report_tc, tcid, test_cases[tcid], job_id=job_id) job_id += '-write' filename = os.path.join(options.outdir, 'test_cases', '%s.html' % tcid) comp(write_report, report, filename, job_id=job_id) test_case_reports[tcid] = report return test_case_reports[tcid] # set of tuple (algo, test_case) executions = {} def stage_execution(tcid, algid): stage_test_case_report(tcid) key = (tcid, algid) if not key in executions: test_case = test_cases[tcid] algo_class, algo_params = available_algorithms[algid] job_id = 'solve-%s-%s-run' % (tcid, algid) results = comp(run_combination, tcid, test_case, algo_class, algo_params, job_id=job_id) executions[key] = results exc_id = '%s-%s' % (tcid, algid) # Create iterations report job_id = 'solve-%s-report' % exc_id report = comp(create_report_execution, exc_id, tcid, test_case, algo_class, algo_params, results, job_id=job_id) job_id += '-write' filename = os.path.join(options.outdir, 'executions', '%s-%s.html' % (tcid, algid)) comp(write_report, report, filename, job_id=job_id) return executions[key] for comb_id in which: comb = available_sets[comb_id] alg_ids = expand_string(comb.algorithms, available_algorithms.keys()) tc_ids = expand_string(comb.test_cases, available_test_cases.keys()) print('Set %r has %d test cases and %d algorithms (~%d jobs in total).' % (comb_id, len(alg_ids), len(tc_ids), len(alg_ids) * len(tc_ids) * 2)) deps = {} for t, a in itertools.product(tc_ids, alg_ids): deps[(t, a)] = stage_execution(t, a) job_id = 'tex-%s' % comb_id comp(create_tables_for_paper, comb_id, tc_ids, alg_ids, deps, job_id=job_id) job_id = 'set-%s-report' % comb_id report = comp(create_report_comb_stats, comb_id, tc_ids, alg_ids, deps, job_id=job_id) job_id += '-write' filename = os.path.join(options.outdir, 'stats', '%s.html' % comb_id) comp(write_report, report, filename, job_id=job_id) if options.report or options.report_stats: if options.report: batch_command('clean *-report*') elif options.report_stats: batch_command('clean set-* tex*') batch_command('parmake') elif options.remake: batch_command('clean *') batch_command('make set-* tex-*') else: compmake_console()
def main(): logger.info('Starting online planning') parser = OptionParser() parser.add_option("-t", "--id_otc", help="", default=None) parser.add_option("-e", "--env", help="environment name", default='default') parser.add_option("-o", "--result", help="Result storage file", default=None) parser.add_option("-r", "--report", help="Specify output reports", default='') (options, _) = parser.parse_args() config = DiffeoplanConfigMaster() config.load('default:/home/adam/diffeo-data/') report_opt = options.report.split(',') set_current_config(config) if options.id_otc is not None: testcases = options.id_otc.split(',') else: testcases = [] if options.result is not None: data_files = options.result.split(',') else: data_files = [] try: all_stats = [] for data_file in data_files: all_stats += pickle.load(open(data_file)) except: pass for tc in testcases: logger.info('Starting up tests with : %s' % tc) planning_module = config.online_testcases.instance(tc) all_stats += planning_module.run_all_tests(options.env) pickle.dump(all_stats, open(options.result, 'wb')) rm = ReportManager('out/online') if 'vis' in report_opt: for i, stat in enumerate(all_stats): report = comp(report_tools.run_report, stat) kwargs = dict(env=stat.labels['env']) rm.add(report, 'online_run_' + str(i), **kwargs) stats_file = yaml.load( open('/home/adam/git/boot12env/src/surf12adam/orbit.online_report.yaml' )) stats_def = [] for i in range(len(stats_file)): if stats_file[i]['id'] in report_opt: stats_def.append(stats_file[i]) kwargs = {'summary': 'summary'} report = comp(report_tools.stat_report, stats_def, all_stats) rm.add(report, 'summary_stats', **kwargs) kwargs = {'summary': 'empty'} report = comp(report_tools.empty_report) rm.add(report, 'empty', **kwargs) rm.create_index_job() compmake_console()
def main(): parser = OptionParser(usage=description) parser.add_option("--flydra_db", default="saccade_data_flydradb", help="Main data directory") parser.add_option( "--interactive", action="store_true", default=False, help="Starts an interactive compmake session." ) parser.add_option("--report", default="saccade_report", help="Saccade report directory") parser.add_option("--groups", default=None, help="Which groups to consider") parser.add_option("--configurations", default=None, help="Which configurations to consider") parser.add_option("--combid", default=None, help="How to name this combination of groups/configs.") (options, args) = parser.parse_args() # @UnusedVariable if args: raise Exception("Spurious arguments %r." % args) db = FlydraDB(options.flydra_db) robust_split = lambda s: filter(lambda x: x, s.split(",")) if not options.groups in [None, "all"]: groups = robust_split(options.groups) if not groups: raise Exception("No groups specified.") groupset = "_".join(groups) else: groups = db.list_groups() groupset = "all" if not groups: raise Exception("No groups found.") if not options.configurations in [None, "all"]: configurations = robust_split(options.configurations) if not configurations: raise Exception("No configuration specified") confset = "_".join(configurations) else: configurations = db.list_all_versions_for_table(SACCADES_TABLE) confset = "all" configurations = set() for group in groups: configurations.update(db.list_versions_for_table_in_group(group, SACCADES_TABLE)) configurations = natsorted(configurations) if not configurations: raise Exception("No valid versions of table %r found." % SACCADES_TABLE) print ("I will consider the configurations: %r" % configurations) if options.combid is None: combination = "%s_%s" % (groupset, confset) else: combination = options.combid print ("I call this combination %r." % combination) output_dir = os.path.join(options.report, combination) if not os.path.exists(output_dir): os.makedirs(output_dir) set_namespace("master_plot_%s" % combination) # we maintain several indices # key = (group, configuration, plot) index_group_plots = {} # key = (sample, plot) index_sample_expdata_plots = {} # key = (sample, configuration, plot) index_sample_saccades_plots = {} # First we index the DB print ("Looking for data in database...") all_samples = set() group2samples = {} configurations_for_group = {} group_has_exp_data = {} for group in groups: group2samples[group] = db.list_samples_for_group(group) all_samples.update(group2samples[group]) if not group2samples[group]: raise Exception("Empty group %r." % group) available = db.list_versions_for_table_in_group(group=group, table=SACCADES_TABLE) configurations_for_group[group] = set(configurations).intersection(available) if not configurations_for_group[group]: print ("No configurations found for %r; available %r" % (group, available)) group_has_exp_data[group] = db.group_has_table(group, EXP_DATA_TABLE) all_samples = natsorted(all_samples) # print info print ("Summary:") for group in groups: msg = " group {group:>20} samples: {nsamples:3} " " configurations: {nconf:3} raw data? {data}".format( group=group, nsamples=len(group2samples[group]), nconf=len(configurations_for_group[group]), data=group_has_exp_data[group], ) print (msg) # TODO: iterate by sample, not by group for group in groups: for configuration in configurations_for_group[group]: for plot in group_plots: job_id = "%s-%s-%s" % (group, configuration, plot.id) index_group_plots[(group, configuration, plot.id)] = comp( wrap_group_plot, options.flydra_db, group, configuration, plot.command, plot.args, job_id=job_id ) for sample, plot in prod(group2samples[group], sample_saccades_plots): job_id = "%s-%s-%s" % (sample, configuration, plot.id) key = (sample, configuration, plot.id) if key in index_sample_saccades_plots: # we already did it as part of another group continue index_sample_saccades_plots[key] = comp( wrap_sample_saccades_plot, options.flydra_db, sample, configuration, plot.command, plot.args, job_id=job_id, ) if group_has_exp_data[group]: for sample, plot in prod(group2samples[group], sample_expdata_plots): job_id = "%s-%s" % (sample, plot.id) key = (sample, plot.id) if key in index_sample_expdata_plots: # we already did it as part of another group continue index_sample_expdata_plots[key] = comp( wrap_sample_expdata_plot, options.flydra_db, sample, plot.command, plot.args, job_id=job_id ) # now we create the indices # fix configuration, function; iterate groups for configuration, plot in itertools.product(configurations, group_plots): subs = [] descs = [] page_id = "%s.%s" % (configuration, plot.id) for group, group_desc in order_groups(groups): if not configuration in configurations_for_group[group]: continue descs.append(group_desc) subs.append(index_group_plots[(group, configuration, plot.id)]) if not subs: raise Exception("no groups for configuration %r." % configuration) job_id = page_id comp(combine_reports, subs, descs, page_id, output_dir, job_id=job_id) comp( create_gui, filename=os.path.join(output_dir, "group_plots.html"), menus=[ ("Detector", configurations, configurations), ("Plot/table", map(lambda x: x.id, group_plots), map(lambda x: x.description, group_plots)), ], job_id="gui-group_plots", ) # fix group, function; iterate samples for group in groups: if not group_has_exp_data[group]: continue for plot in sample_expdata_plots: subs = [] descs = [] for sample in group2samples[group]: descs.append(sample) subs.append(index_sample_expdata_plots[(sample, plot.id)]) page_id = "%s.%s" % (group, plot.id) job_id = page_id comp(combine_reports, subs, descs, page_id, output_dir, job_id=job_id) # get the ordered group lists and desc ordered_groups = map(lambda t: t[0], order_groups(groups)) ordered_groups_desc = map(lambda t: t[1], order_groups(groups)) comp( create_gui, filename=os.path.join(output_dir, "expdata_plots.html"), menus=[ ("Group", ordered_groups, ordered_groups_desc), ( "Plot/table", map(lambda x: x.id, sample_expdata_plots), map(lambda x: x.description, sample_expdata_plots), ), ], job_id="gui-expdata_plots", ) # fix configuration, group, function; iterate samples for group in groups: for configuration in configurations: if not configuration in configurations_for_group[group]: for plot in sample_saccades_plots: page_id = "%s.%s.%s" % (configuration, group, plot.id) comp( write_empty, page_id, output_dir, "Group %r has not been processed with algorithm %r." % (group, configuration), job_id=page_id, ) continue for plot in sample_saccades_plots: subs = [] descs = [] for sample in group2samples[group]: descs.append(sample) r = index_sample_saccades_plots[(sample, configuration, plot.id)] subs.append(r) page_id = "%s.%s.%s" % (configuration, group, plot.id) job_id = page_id comp(combine_reports, subs, descs, page_id, output_dir, job_id=job_id) comp( create_gui, filename=os.path.join(output_dir, "saccade_plots.html"), menus=[ ("Detector", configurations, configurations), ("Group", ordered_groups, ordered_groups_desc), ( "Plot/table", map(lambda x: x.id, sample_saccades_plots), map(lambda x: x.description, sample_saccades_plots), ), ], job_id="gui-saccade_plots", ) # fix configuration, sample; plot fullsscreen for group in groups: for configuration in configurations: for sample in group2samples[group]: # XXX make it clenaer if not configuration in configurations_for_group[group]: for plot in sample_fullscreen_plots: page_id = "%s.%s.%s" % (sample, configuration, plot.id) comp( write_empty, page_id, output_dir, 'Group %s has not been processed with algorithm "%s".' % (group, configuration), job_id=page_id, ) # print "skipping sample %s group %s config %s" %\ # (sample,group, configuration) continue if not group_has_exp_data[group]: for plot in sample_fullscreen_plots: page_id = "%s.%s.%s" % (sample, configuration, plot.id) comp( write_empty, page_id, output_dir, "Group %r does not have raw experimental data." % (group), job_id=page_id, ) continue for plot in sample_fullscreen_plots: job_id = "%s-%s-%s" % (sample, configuration, plot.id) # FIXME: error if sample in 2 groups job = comp( wrap_sample_saccades_plot, options.flydra_db, sample, configuration, plot.command, plot.args, job_id=job_id, ) page_id = "%s.%s.%s" % (sample, configuration, plot.id) comp(write_report, job, output_dir, page_id, job_id=job_id + "-write_report") comp( create_gui, filename=os.path.join(output_dir, "sample_fullscreen_plots.html"), menus=[ ("Sample", all_samples, all_samples), ("Detector", configurations, configurations), ( "Plot/table", map(lambda x: x.id, sample_fullscreen_plots), map(lambda x: x.description, sample_fullscreen_plots), ), ], job_id="gui-sample_fullscreen_plots", ) tabs = [ ( "group_plots", "By group", "This set displays one plot/table for each group of samples. " "You have the further choice of detection algorithm and plot/table to display.", ), ( "saccade_plots", "By sample", "This set displays one plot/table for each individual sample. " "You have the further choice of which group to consider, which " "detection algorithm, and which plot/table to display.", ), ( "expdata_plots", "By sample (raw)", "This set displays one plot/table for each individual sample, " " produced from the raw data (no saccade detection, so no choice of detector). " "You have the further choice of which group to consider, " "and which plot/table to display." " Note that some samples might be missing; for example, we don't use " " raw orientation data for the Mamarama samples.", ), ( "sample_fullscreen_plots", "By sample, single page", "This set displays one entire page for each sample. " "You have the further choice of sample, " "detection algorithm, and which plot/table to display.", ), ] comp(create_main_gui, tabs, filename=os.path.join(output_dir, "main.html"), job_id="gui-main") if options.interactive: # start interactive session compmake_console() else: # batch mode # try to do everything batch_command("make all") # start the console if we are not done # (that is, make all failed for some reason) todo = list(parse_job_list("todo")) if todo: print ("Still %d jobs to do." % len(todo)) sys.exit(-2)
def main(): parser = OptionParser() group = OptionGroup(parser, "Files and directories") group.add_option("--outdir", help='Directory with variables.pickle and where ' 'the output will be placed.') parser.add_option_group(group) group = OptionGroup(parser, "Experiments options") group.add_option("--fast", default=False, action='store_true', help='Disables sanity checks.') group.add_option("--set", default='*', help='[= %default] Which combinations to run.') group.add_option("--seed", default=None, type='int', help='[= %default] Seed for random number generator.') parser.add_option_group(group) group = OptionGroup(parser, "Compmake options") group.add_option("--remake", default=False, action='store_true', help='Remakes all (non interactive).') group.add_option("--report", default=False, action='store_true', help='Cleans and redoes all reports (non interactive).') group.add_option( "--report_stats", default=False, action='store_true', help='Cleans and redoes the reports for the stats. (non interactive)') parser.add_option_group(group) (options, args) = parser.parse_args() #@UnusedVariable np.random.seed(options.seed) if options.fast: disable_all() assert not args assert options.outdir is not None available_algorithms, available_test_cases, available_sets = get_everything( ) which = expand_string(options.set, list(available_sets.keys())) if len(which) == 1: compmake_storage = os.path.join(options.outdir, 'compmake', which[0]) else: compmake_storage = os.path.join(options.outdir, 'compmake', 'common_storage') use_filesystem(compmake_storage) print('Staging creation of test cases reports') test_cases = {} test_case_reports = {} def stage_test_case_report(tcid): if not tcid in available_test_cases: msg = ('Could not find test case %r \n %s' % (tcid, available_test_cases.keys())) raise Exception(msg) if not tcid in test_cases: command, args = available_test_cases[tcid] job_id = 'test_case_data-%s' % tcid test_cases[tcid] = comp(command, job_id=job_id, **args) if not tcid in test_case_reports: job_id = 'test_case-%s-report' % tcid report = comp(create_report_tc, tcid, test_cases[tcid], job_id=job_id) job_id += '-write' filename = os.path.join(options.outdir, 'test_cases', '%s.html' % tcid) comp(write_report, report, filename, job_id=job_id) test_case_reports[tcid] = report return test_case_reports[tcid] # set of tuple (algo, test_case) executions = {} def stage_execution(tcid, algid): stage_test_case_report(tcid) key = (tcid, algid) if not key in executions: test_case = test_cases[tcid] algo_class, algo_params = available_algorithms[algid] job_id = 'solve-%s-%s-run' % (tcid, algid) results = comp(run_combination, tcid, test_case, algo_class, algo_params, job_id=job_id) executions[key] = results exc_id = '%s-%s' % (tcid, algid) # Create iterations report job_id = 'solve-%s-report' % exc_id report = comp(create_report_execution, exc_id, tcid, test_case, algo_class, algo_params, results, job_id=job_id) job_id += '-write' filename = os.path.join(options.outdir, 'executions', '%s-%s.html' % (tcid, algid)) comp(write_report, report, filename, job_id=job_id) return executions[key] for comb_id in which: comb = available_sets[comb_id] alg_ids = expand_string(comb.algorithms, available_algorithms.keys()) tc_ids = expand_string(comb.test_cases, available_test_cases.keys()) print( 'Set %r has %d test cases and %d algorithms (~%d jobs in total).' % (comb_id, len(alg_ids), len(tc_ids), len(alg_ids) * len(tc_ids) * 2)) deps = {} for t, a in itertools.product(tc_ids, alg_ids): deps[(t, a)] = stage_execution(t, a) job_id = 'tex-%s' % comb_id comp(create_tables_for_paper, comb_id, tc_ids, alg_ids, deps, job_id=job_id) job_id = 'set-%s-report' % comb_id report = comp(create_report_comb_stats, comb_id, tc_ids, alg_ids, deps, job_id=job_id) job_id += '-write' filename = os.path.join(options.outdir, 'stats', '%s.html' % comb_id) comp(write_report, report, filename, job_id=job_id) if options.report or options.report_stats: if options.report: batch_command('clean *-report*') elif options.report_stats: batch_command('clean set-* tex*') batch_command('parmake') elif options.remake: batch_command('clean *') batch_command('make set-* tex-*') else: compmake_console()