def main_process(my_id): config_params = ConfigReader().parse_vars( ["RECV_QUEUE", "SEND_QUEUE", "MASTER_SEND_QUEUE", "SEND_REQUEST_QUEUE", "STATUS_QUEUE", "DATA_CLUSTER_WRITE", "DATA_CLUSTER_READ" ] ) worker = Worker( config_params["RECV_QUEUE"], [config_params["SEND_QUEUE"]], config_params["MASTER_SEND_QUEUE"], config_params["SEND_REQUEST_QUEUE"], config_params["STATUS_QUEUE"], config_params["DATA_CLUSTER_WRITE"], config_params["DATA_CLUSTER_READ"], my_id ) while True: worker.start()
def main_process(): config_params = ConfigReader().parse_vars([ "RECV_QUEUE", "TOTAL_DATE_WORKERS", "TOTAL_MAP_WORKERS", "TOTAL_SINGLE_MAP_WORKERS", "TOTAL_COUNT_WORKERS", "DATA_CLUSTER_WRITE", "DATA_CLUSTER_READ", "PLACE_MANAGER_QUEUE" ]) senders = [] for i in range(0, int(config_params["TOTAL_DATE_WORKERS"])): senders.append("date_worker_" + str(i + 1)) for i in range(0, int(config_params["TOTAL_COUNT_WORKERS"])): senders.append("count_worker_" + str(i + 1)) for i in range(0, int(config_params["TOTAL_MAP_WORKERS"])): for j in range(0, int(config_params["TOTAL_SINGLE_MAP_WORKERS"])): senders.append("map_worker_" + str(i + 1) + "__" + str(j)) worker = CoordinatorManager(config_params["RECV_QUEUE"], senders, config_params["DATA_CLUSTER_WRITE"], config_params["DATA_CLUSTER_READ"], config_params["PLACE_MANAGER_QUEUE"]) worker.start()
def main(): params = ConfigReader().parse_vars(["STATUS_QUEUE", "WORKERS", "WORKER_ID", "WORKER_TYPE"]) processes = [] worker_id = 0 for worker in range(0, int(params["WORKERS"])): my_id = generate_worker_id(str(worker_id), params["WORKER_ID"]) p = Process(target=main_process, args=(my_id, )) p.start() processes.append(p) worker_id += 1 checker = StatusChecker( params["WORKER_ID"], params["WORKER_TYPE"], processes, params["STATUS_QUEUE"] ) checker.start() print("Joining processes") for p in processes: print("joining one process") p.join() print("Process joined")
def health_process(self, update_queue): print("Starting health process") config_params = ConfigReader().parse_vars(["INIT_QUEUE"]) watcher = Watcher(config_params["INIT_QUEUE"], update_queue) watcher.start()
def main(): config_params = ConfigReader().parse_vars( ["RECV_QUEUE", "SEND_QUEUE", "MASTER_SEND_QUEUE"]) worker = Worker(config_params["RECV_QUEUE"], [config_params["SEND_QUEUE"]], config_params["MASTER_SEND_QUEUE"]) worker.start()
def read_manager_process(self): print("Starting read manager") config_params = ConfigReader().parse_vars(["RECV_READ_QUEUE"]) read_manager = ReadManager(ROUTE, config_params["RECV_READ_QUEUE"]) read_manager.start()
def replica_manager_process(self): print("Starting replica manager") config_params = ConfigReader().parse_vars(["RECV_REPLICA"]) replica_manager = ReplicaManager(ROUTE, config_params["RECV_REPLICA"]) replica_manager.start()
def main(): config_params = ConfigReader().parse_vars( ["RECV_QUEUE", "SEND_QUEUE", "TOTAL_WORKERS"]) worker = DateSorter(config_params["RECV_QUEUE"], config_params["SEND_QUEUE"], int(config_params["TOTAL_WORKERS"])) worker.start()
def main(): config_params = ConfigReader().parse_vars( ["RECV_QUEUE", "SEND_QUEUE", "TOTAL_WORKERS"]) master_controller = MasterController(config_params["RECV_QUEUE"], config_params["SEND_QUEUE"], int(config_params["TOTAL_WORKERS"])) master_controller.start()
def write_manager_process(self): print("Starting write manager") config_params = ConfigReader().parse_vars(["RECV_WRITE_QUEUE"]) write_manager = WriteManager(ROUTE, config_params["RECV_WRITE_QUEUE"], []) write_manager.start()
def main_process(): config_params = ConfigReader().parse_vars([ "RECV_QUEUE", "SEND_QUEUE", "STATUS_QUEUE", "DATA_CLUSTER_WRITE", "DATA_CLUSTER_READ" ]) worker = Worker(config_params["RECV_QUEUE"], config_params["SEND_QUEUE"], config_params["STATUS_QUEUE"], config_params["DATA_CLUSTER_WRITE"], config_params["DATA_CLUSTER_READ"]) worker.start()
def main_process(): config_params = ConfigReader().parse_vars([ "RECV_QUEUE", "STATUS_QUEUE", "DATA_CLUSTER_WRITE", "DATA_CLUSTER_READ", "COORDINATOR_QUEUE" ]) master_controller = SummaryController(config_params["RECV_QUEUE"], config_params["STATUS_QUEUE"], config_params["DATA_CLUSTER_WRITE"], config_params["DATA_CLUSTER_READ"], config_params["COORDINATOR_QUEUE"]) master_controller.start()
def resume_master_process(): config_params = ConfigReader().parse_vars([ "RECV_RESUME_QUEUE", "SEND_RESUME_QUEUE", "TOTAL_WORKERS", "STATUS_QUEUE", "WORKER_ID", "DATA_CLUSTER_WRITE", "DATA_CLUSTER_READ" ]) master_controller = ResumeMasterController( config_params["RECV_RESUME_QUEUE"], config_params["SEND_RESUME_QUEUE"], int(config_params["TOTAL_WORKERS"]), config_params["STATUS_QUEUE"], config_params["WORKER_ID"], config_params["DATA_CLUSTER_WRITE"], config_params["DATA_CLUSTER_READ"]) master_controller.start()
def receiver_process(): config_params = ConfigReader().parse_vars([ "RECV_QUEUE", "DATA_CLUSTER_WRITE", "DATA_CLUSTER_READ", "RECV_REQUEST_QUEUE" ]) worker = PlaceReceiver(config_params["RECV_QUEUE"], config_params["RECV_REQUEST_QUEUE"], config_params["DATA_CLUSTER_WRITE"], config_params["DATA_CLUSTER_READ"]) while True: worker.start()
def main(): p = Process(target=main_process) p.start() params = ConfigReader().parse_vars( ["STATUS_QUEUE", "WORKER_ID", "WORKER_TYPE"]) checker = StatusChecker(params["WORKER_ID"], params["WORKER_TYPE"], [p], params["STATUS_QUEUE"]) checker.start() p.join()
def main(): print("Starting Cluster Node") config_params = ConfigReader().parse_vars(["MY_ID", "MY_DIR", "PORT"]) nodes_ids = [] my_node = Node(config_params["MY_ID"], config_params["MY_DIR"]) bully_leader = BullyLeader(my_node, int(config_params["PORT"]), nodes_ids) process_manager = ProcessManager(bully_leader) process_manager.start()
def main_process(): config_params = ConfigReader().parse_vars([ "RECV_QUEUE", "SEND_QUEUE", "TOTAL_WORKERS", "STATUS_QUEUE", "DATA_CLUSTER_WRITE", "DATA_CLUSTER_READ" ]) worker = TopCitiesController(config_params["RECV_QUEUE"], config_params["SEND_QUEUE"], int(config_params["TOTAL_WORKERS"]), config_params["STATUS_QUEUE"], config_params["DATA_CLUSTER_WRITE"], config_params["DATA_CLUSTER_READ"]) worker.start()
def main(): config_params = ConfigReader().parse_vars([ "QUEUE_MAP", "QUEUE_DATE", "QUEUE_COUNT", "EOF_MAP", "EOF_DATE", "EOF_COUNT", "TOPIC_PLACES" ]) chunk_manager = ChunkManager( config_params["QUEUE_MAP"], config_params["QUEUE_DATE"], config_params["QUEUE_COUNT"], config_params["EOF_MAP"], config_params["EOF_DATE"], config_params["EOF_COUNT"], config_params["TOPIC_PLACES"]) chunk_manager.process_places(PLACE_FILE) chunk_manager.process_data(DATA_FILE)
def main(): master = Process(target=master_process) master.start() resume_master = Process(target=resume_master_process) resume_master.start() params = ConfigReader().parse_vars( ["STATUS_QUEUE", "WORKER_ID", "WORKER_TYPE"]) checker = StatusChecker(params["WORKER_ID"], params["WORKER_TYPE"], [master, resume_master], params["STATUS_QUEUE"]) checker.start() master.join() resume_master.join()
def main(): print("Starting Watcher Node") config_params = ConfigReader().parse_vars([ "MY_ID", "MY_DIR", "PORT", "NODE_A", "NODE_B", "NODE_C", "NODE_D", "ID_A", "ID_B", "ID_C", "ID_D" ]) nodes_ids = [ Node(config_params["ID_A"], config_params["NODE_A"]), Node(config_params["ID_B"], config_params["NODE_B"]), Node(config_params["ID_C"], config_params["NODE_C"]), Node(config_params["ID_D"], config_params["NODE_D"]) ] my_node = Node(config_params["MY_ID"], config_params["MY_DIR"]) bully_leader = BullyLeader(my_node, int(config_params["PORT"]), nodes_ids) process_manager = ProcessManager(bully_leader) process_manager.start()
def main(): config_params = ConfigReader().parse_vars(["RECV_QUEUE"]) master_controller = SummaryController(config_params["RECV_QUEUE"], ) master_controller.start()
def initialize_workers(self, config_file): initial_workers = ConfigReader().parse_from_file(config_file) print("Initializing workers") for worker_id, worker_type in initial_workers.items(): print("Added from config: {} {}".format(worker_id, worker_type)) self.worker_manager.add_worker(worker_id, worker_type)