def _InitCli(pool_type = None, cluster_config = None, model_name = None, params = None, edit_params = False, layer = None, verbose = 0, **opts): if verbose > 0: Verbose(True) if verbose > 1: logging.getLogger().setLevel(logging.INFO) # Make the worker pool if pool_type != None: pool_type = pool_type.lower() if pool_type in ('c', 'cluster'): UseCluster(cluster_config) elif pool_type in ('m', 'multicore'): pool = pools.MulticorePool() SetPool(pool) elif pool_type in ('s', 'singlecore'): pool = pools.SinglecorePool() SetPool(pool) else: raise util.UsageException("Unknown pool type: %s" % pool_type) try: SetModelClass(model_name) except ValueError: raise util.UsageException("Unknown model (-m): %s" % model_name) SetParams(params) SetLayer(layer) if edit_params: GetParams().configure_traits()
def main(): default_model = "viz2" try: opts, args = util.GetOptions("c:m:v") model_name = default_model pool = None pool = pools.MulticorePool() for opt, arg in opts: if opt == '-c': # Use a cluster of worker nodes from glimpse.pools.cluster import ClusterConfig, ClusterPool config = ClusterConfig(arg) pool = ClusterPool(config) elif opt == '-m': # Set the model class arg = arg.lower() if arg == "default": model_name = default_model else: model_name = arg elif opt == '-v': logging.getLogger().setLevel(logging.INFO) models = __import__("glimpse.models.%s" % model_name, globals(), locals(), ['cmds'], 0) try: model = getattr(models, 'cmds') except AttributeError: raise util.UsageException("Unknown model (-m): %s" % model_name) except util.UsageException, e: util.Usage("[options]\n" " -c FILE Use a cluster to evaluate images, configured in FILE.\n" " -m MOD Use model named MOD\n" " -v Enable verbose logging", e )
def SetPool(pool): """Set the worker pool used for this experiment.""" global __POOL if util.IsString(pool): pool = pool.lower() if pool == 'singlecore': pool = pools.SinglecorePool() elif pool == 'multicore': pool = pools.MulticorePool() else: raise ValueError("Unknown pool type: %s" % pool) logging.info("Using pool type: %s", type(pool).__name__) __POOL = pool return pool
def RunWorker(job_server_url, command_url, log_url, num_processes=None): """Launch a Gearman worker and wait for it to complete. This worker processes batch requests using a :class:`MulticorePool <glimpse.pools.MulticorePool>`. :param str job_server_url: URL for Gearman job server. :param str command_url: URL for command channel. :param str log_url: URL for logging channel. :param int num_processes: Number of concurrent processes to use when processing a batch request. Defaults to the number of available cores. :returns: Exit status of the worker processes. """ pool = pools.MulticorePool(num_processes) stats = dict(HOST=socket.getfqdn(), PID=os.getpid(), ELAPSED_TIME=0, NUM_REQUESTS=0, START_TIME=time.strftime("%Y-%m-%d %H:%M:%S")) get_stats = lambda: dict(stats.items()) # return a copy def handle_map(worker, job): """Map a function onto its arguments. worker -- (gearman.worker.GearmanWorker) job -- (gearman.job.GearmanJob) """ try: start = time.time() func, args = job.data if func == None: logging.info("Looking up cached function") func = worker.memory[CACHED_FUNC_KEY] logging.info("Worker processing task with %d elements" % len(args)) results = pool.map(func, args) elapsed_time = time.time() - start logging.info("\tfinished in %.2f secs" % elapsed_time) stats['ELAPSED_TIME'] += elapsed_time stats['NUM_REQUESTS'] += 1 return results except Exception: logging.exception("Caught exception in worker") raise worker = Worker([job_server_url]) # Start the command listener logging.info("Gearman worker starting with job server at %s" % job_server_url) logging.info("\tSUB commands from %s" % command_url) logging.info("\tPUB logs to %s" % log_url) cmd_future = FutureSocket(url=command_url, bind=False) log_future = FutureSocket(url=log_url, bind=False) cmd_thread = threading.Thread(target=CommandHandlerTarget, args=(worker, cmd_future, log_future, get_stats)) cmd_thread.daemon = True cmd_thread.start() # Start the task processor worker.set_client_id("GlimpseWorker") worker.register_task(GEARMAN_TASK_MAP, handle_map) # Process tasks, checking command channel every two seconds worker.work(poll_timeout=2.0) # Wait for worker to exit worker.shutdown() return worker.exit_status