from conf_analysis.meg import artifacts, preprocessing from conf_analysis.behavior import empirical, metadata, keymap import mne, locale import numpy as np import pickle locale.setlocale(locale.LC_ALL, "en_US") result = {} raw = mne.io.read_raw_ctf(filename, system_clock='ignore') trials = preprocessing.blocks(raw, full_file_cache=True) trl, bl = trials['trial'], trials['block'] bcnt = 0 for b in np.unique(bl): if len(trl[bl == b]) >= 75: result[b] = bcnt bcnt += 1 print((b, bcnt)) return result block_map = {} for snum in range(1, 16): filenames = [metadata.get_raw_filename(snum, b) for b in range(4)] block_map[snum] = {} for session, filename in enumerate(filenames): block_map[snum][session] = executor.submit(do_one, filename) diagnostics.progress(block_map) block_map = executor.gather(block_map) pickle.dump(block_map, open('blockmap.pickle', 'w'))
SCHEDULER_PORT = 5678 SCHEDULER_HTTP_PORT = 9786 SCHEDULER_BOKEH_PORT = 12345 SCHEDULER_IP = '127.0.0.1' HOME_PAGE = 'http://localhost:5050' def test(one, two): return 4 executor = Executor('{}:{}'.format(SCHEDULER_IP, SCHEDULER_PORT)) taskclient.add_user(HOME_PAGE) for _ in range(5): taskclient.add_job(HOME_PAGE) result_list = [] num_iters = 50 #result = executor.map(taskclient.worker, [HOME_PAGE, HOME_PAGE], [0, 1]) result = executor.map(taskclient.worker, itertools.repeat(HOME_PAGE, num_iters), range(num_iters)) result_list = result distributed.diagnostics.progress(result_list) print() print(executor.who_has(result_list)) sim_results = executor.gather(result_list) print('---------')
class DistributedContext(object): io_loop = None io_thread = None def __init__(self, ip="127.0.0.1", port=8787, spawn_workers=0, write_partial_results=None, track_progress=False, time_limit=None, job_observer=None): """ :type ip: string :type port: int :type spawn_workers: int :type write_partial_results: int :type track_progress: bool :type time_limit: int :type job_observer: JobObserver """ self.worker_count = spawn_workers self.ip = ip self.port = port self.active = False self.write_partial_results = write_partial_results self.track_progress = track_progress self.execution_count = 0 self.timeout = TimeoutManager(time_limit) if time_limit else None self.job_observer = job_observer if not DistributedContext.io_loop: DistributedContext.io_loop = IOLoop() DistributedContext.io_thread = Thread( target=DistributedContext.io_loop.start) DistributedContext.io_thread.daemon = True DistributedContext.io_thread.start() if spawn_workers > 0: self.scheduler = self._create_scheduler() self.workers = [self._create_worker() for i in xrange(spawn_workers)] time.sleep(0.5) # wait for workers to spawn self.executor = Executor((ip, port)) def run(self, domain, worker_reduce_fn, worker_reduce_init, global_reduce_fn, global_reduce_init): size = domain.steps assert size is not None # TODO: Iterators without size workers = 0 for name, value in self.executor.ncores().items(): workers += value if workers == 0: raise Exception("There are no workers") batch_count = workers * 4 batch_size = max(int(round(size / float(batch_count))), 1) batches = self._create_batches(batch_size, size, domain, worker_reduce_fn, worker_reduce_init) logging.info("Qit: starting {} batches with size {}".format( batch_count, batch_size)) if self.job_observer: self.job_observer.on_computation_start(batch_count, batch_size) futures = self.executor.map(process_batch, batches) if self.track_progress: distributed.diagnostics.progress(futures) if self.write_partial_results is not None: result_saver = ResultSaver(self.execution_count, self.write_partial_results) else: result_saver = None timeouted = False results = [] for future in as_completed(futures): job = future.result() if result_saver: result_saver.handle_result(job.result) if self.job_observer: self.job_observer.on_job_completed(job) results.append(job.result) if self.timeout and self.timeout.is_finished(): logging.info("Qit: timeouted after {} seconds".format( self.timeout.timeout)) timeouted = True break # order results if not timeouted: results = [j.result for j in self.executor.gather(futures)] self.execution_count += 1 if worker_reduce_fn is None: results = list(itertools.chain.from_iterable(results)) logging.info("Qit: finished run with size {} (taking {})".format( len(results), domain.size)) results = results[:domain.size] # trim results to required size if global_reduce_fn is None: return results else: if global_reduce_init is None: return reduce(global_reduce_fn, results) else: return reduce(global_reduce_fn, results, global_reduce_init) def _create_scheduler(self): scheduler = Scheduler(ip=self.ip) scheduler.start(self.port) return scheduler def _create_worker(self): worker = Worker(scheduler_ip=self.ip, scheduler_port=self.port, ncores=1) worker.start(0) return worker def _create_batches(self, batch_size, size, domain, worker_reduce_fn, worker_reduce_init): batches = [] i = 0 while True: new = i + batch_size if i + batch_size <= size: batches.append((domain, i, batch_size, worker_reduce_fn, worker_reduce_init)) i = new if new == size: break else: batches.append((domain, i, size - i, worker_reduce_fn, worker_reduce_init)) break return batches
import argparse def inject(url): import config from pymongo import MongoClient import os #connstring = config.db['host'] + ":" + config.db['port'] run = "sqlmap -u " + url + ' --batch ' command = os.popen(run).read() data = {'url': url, 'output': command} client = MongoClient('192.168.1.14:27017') dbn = config.db['dbname'] db = client[dbn] results = db.results results.insert(data) return command executor = Executor() parser = argparse.ArgumentParser() parser.add_argument("-f") args = parser.parse_args() links = [] s = open(args.f, 'r') for x in s: links.append(x.rstrip().replace("\n", "")) job = executor.map(inject, links) print executor.gather(job)
if (not args.append) and (os.path.exists(out_path)): shutil.rmtree(out_path) if not os.path.exists(out_path): os.makedirs(os.path.join(out_path, 'feat/df')) os.makedirs(os.path.join(out_path, 'feat/desc')) odDicts = [{ 'flight_hdf': args.uvan, 'img_num': ii, 'kp_det_func': kp_type_dict[kp_type], 'kp_desc_func': kp_type_dict[kp_type], 'p_meta': tf_meta, 'o_path': out_path } for ii in range(num_imgs)] r = executor.map(extract_kp_from_frame, odDicts, pure=False) kp_list = executor.gather(r) kp_meta = pd.DataFrame(kp_list, columns=[ 'num_feat', 'center_lon', 'center_lat', 'df_path', 'desc_path', 'flight', 'img_num' ]) kp_meta.to_hdf(os.path.join(out_path, 'feat_meta.hdf'), key='feat_meta') print('what')