def start_scraping(): try: merchant_id = request.args.get("merchant_id") num_pages = request.args.get("num_pages") print(f"merchant id is {merchant_id}, num_pages is {num_pages}", file=sys.stdout) run(merchant_id, num_pages) return f"request was received for merchant id: {merchant_id}" except Exception as e: return f"Error {e}"
def run(flow): start_time = time.time() logging.info("Processing flow: {}".format(flow)) try: validate(flow, schema) except ValidationError as e: logging.info("Flow did not validate against schema") logging.exception(e) return jsonify(error=str(e)), 400 try: response = worker.run(conn_mgr, flow) if isinstance(response, str): logging.info("Response for flow {} is an error.".format(flow)) logging.error(response) return jsonify(error=response), 400 logging.info("Sending response after %.1fs for flow: %s" % (time.time() - start_time, flow)) return jsonify(response), 200 except Exception as e: logging.info( "Exception occurred while processing flow {}".format(flow)) logging.exception(e) return jsonify(error=str(e)), 400
def main(): """ Fork master process and start flask server in current process and pool of workers :return: """ startup() setup_logger() logger.info(f'forking...') queue = multiprocessing.Queue() pid = os.fork() if pid == 0: app.mp_queue = queue atexit.register(shutdown) app.run(host=config.IP, port=config.PORT, debug=False) else: worker.run(queue)
def start(args): """ Setting up Tensorflow for data parallel work """ spec = cluster_spec(args.num_workers, 1) cluster = tf.train.ClusterSpec(spec).as_cluster_def() if args.job_name == "worker": server = tf.train.Server(cluster, job_name="worker", task_index=args.task, config=tf.ConfigProto( intra_op_parallelism_threads=1, inter_op_parallelism_threads=2)) run(args, server) else: server = tf.train.Server( cluster, job_name="ps", task_index=args.task, config=tf.ConfigProto(device_filters=["/job:ps"])) while True: time.sleep(1000)
def test_run(): counter = mocks.CountingMethod() worker = run(counter.get()) worker.join() assert counter.count == 1
def job(code, token, url): worker.run(code = code, token = token, url = url)
def collect_remo_data(): worker.run()
import os instance_type = os.environ.get("INSTANCE_TYPE") if instance_type == "worker": import worker worker.run() else: import collector collector.run()
## cartpole: 4, 32, 1, 5, 5 ## pong: 6400, 256, 0.01, 50, 50 from worker import run parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("-a", "--num_actions", type=int, default=2, help="possible agent actions") ## for model to save and load parser.add_argument("-ld", "--logdir", type=str, default=os.path.join(os.getcwd(), "ac_pong_log"), help="log data directory") parser.add_argument("-lm", "--load_model", type=bool, default=False, help="load from existing checkpoint") parser.add_argument("-xdim", "--input_dim", type=int, default=4, help="total dimension of X") parser.add_argument("-hl", "--num_hidden_layer", type=int, default=2, help="hidden layer dim") parser.add_argument("-hd", "--hidden_dim", type=int, default=32, help="hidden layer dim") parser.add_argument("-uf", "--update_freq", type=int, default=4, help="update frequency for training") parser.add_argument("-bs", "--batch_size", type=int, default=5, help="mini batch size for training") parser.add_argument("-ml", "--max_ep_length", type=int, default=100, help="max size for episode steps") parser.add_argument("-lr", "--learning_rate", type=float, default=0.01, help="learning rate for training") parser.add_argument("-g", "--gamma", type=float, default=0.9, help="Discount Factor") parser.add_argument("-ep", "--total_episodes", type=int, default=10000, help="Total episode number") parameters = parser.parse_args() if __name__ == "__main__": run(parameters)
def test_worker_honors_configured_delimiter(): assert run(json_event, DiffDelimiterContext())
def test_worker_understands_json_event(): assert run(json_event, VoidContext())
try: import config print('[Info] Loading configuration from config.py') Server = config.Server Dashboard = config.Dashboard Worker = config.Worker except ImportError: print('[Warning] No configuration file were found! Using default config settings.') print('[Warning] For more info see config.py. example file or README file.') Server = None Dashboard = None Worker = None if len(sys.argv) < 2: sys.exit('Usage: %s component-name (i.e. server, dashboard or worker)' % sys.argv[0]) elif (sys.argv[1] == 'worker'): print 'running worker' import worker worker.run(Worker) elif (sys.argv[1] == 'dashboard'): print 'running dashboard' import dashboard dashboard.run(Dashboard) elif (sys.argv[1] == 'server'): print 'running server' import server server.run(Server) else: sys.exit('Usage: %s component-name (i.e. server, dashboard or worker)' % sys.argv[0])
# The poller functions import poller # Size of the board default_board_size = 9 # Directory for save files save_dir = 'games/' # If it already exists, just passes if not os.path.isdir(save_dir): mkdir(save_dir) # Process our arguments numWorkers = sys.argv[1] # Initialize the worker queues if not os.path.isdir('queues/'): mkdir('queues') for i in range(numWorkers): f = open('queues/' + str(i) + '_queue.p') pickle.dump(deque(), f, pickle.HIGHEST_PROTOCOL) f.close() # Start the poller and workers val = os.fork() # If the child if val == 0: worker.run() else: # The parent, and we should run the poller poller.run()
from datetime import datetime import worker import configparser import os import sys import const cfgpath = os.path.join(const.ROOT_PATH, './config.ini') conf = configparser.ConfigParser() conf.read(cfgpath, encoding="utf-8") tasks = conf.get('main', 'task').split(',') for code in tasks: token = conf.get(code, 'token') url = conf.get('main', 'url') worker.run(code=code, token=token, url=url) os.system('pause')
def start_worker(): process_txn_task = _create_process_txn_task() worker.run(process_txn_task)