def test_sftp_download(self): q = Queue() q2 = Queue() q.put((0, {"url": test_urls["sftp_small"]["url"], "dest": test_dest})) q.put((1, {"url": test_urls["sftp_pkey_small"]["url"], "dest": test_dest, "key_filename": test_urls["sftp_pkey_small"]["key_filename"], "passphrase": test_urls["sftp_pkey_small"]["key_filename"]})) worker = Worker({"wait_task": 0}, q, q2) worker.start() worker.join() self.assertTrue(q.empty()) self.assertTrue(os.path.exists(test_urls["sftp_small"]["path"])) self.assertTrue(os.path.exists(test_urls["sftp_pkey_small"]["path"])) with open(test_urls["sftp_small"]["path"], "rb") as f: data = f.read() self.assertEqual(hashlib.md5(data).hexdigest(), test_urls["sftp_small"]["md5"]) with open(test_urls["sftp_pkey_small"]["path"], "rb") as f: data = f.read() self.assertEqual(hashlib.md5(data).hexdigest(), test_urls["sftp_pkey_small"]["md5"]) FileManager.remove_file(test_urls["sftp_small"]["path"]) FileManager.remove_file(test_urls["sftp_pkey_small"]["path"])
def test_many_download(self): q = Queue() q2 = Queue() selects = {} for i in range(10): choice = random.choice(list(test_urls.keys())) if choice in selects.keys(): selects[choice] += 1 else: selects[choice] = 1 q.put((i, {"url": test_urls[choice]["url"], "dest": test_dest})) worker = Worker({"wait_task": 1, "wait_retry": 0, "max_retry": 1}, q, q2) worker.start() worker.join() self.assertTrue(q.empty()) for key, value in selects.items(): path = test_urls[key]["path"] dirname = FileManager.get_dirname(path) basename = FileManager.get_basename(path) for i in range(value): filepath = os.path.join(dirname, "{}_{}".format(i, basename)) if i != 0 else path self.assertTrue(os.path.exists(filepath)) with open(filepath, "rb") as f: data = f.read() self.assertEqual(hashlib.md5(data).hexdigest(), test_urls[key]["md5"]) FileManager.remove_file(filepath)
def test_fail_download(self): works = Queue(maxsize=0) progresses = Queue(maxsize=0) for i, key in enumerate(test_urls): works.put((i + 1, { "url": test_urls[key]["url"], "dest": test_dest })) for i in range(4): worker = Worker({ "wait_task": 1, "wait_retry": 0, "max_retry": 1 }, works, progresses, test_net=True, name="worker{}".format(i)) worker.setDaemon(True) worker.start() visualizer = Visualizer(4, progresses, name="visualizer") visualizer.start() works.join() visualizer.join() self.assertTrue(works.empty()) self.assertTrue(progresses.empty()) self.assertEqual(visualizer.fail, 4) self.assertEqual(visualizer.fail, visualizer.task) self.assertEqual(len(visualizer.results), 4) self.assertTrue(len(os.listdir(test_dest)) == 0)
def start_stop(self): """ Start the GOL simulation on a separate thread or stop it if it was already running """ if not self._gol_model.get_running(): self._gol_model.set_running(True) self._worker = Worker(self.single_step, 1 / self._gol_model.get_fps()) self._worker.start() else: self._worker.stop() self._gol_model.set_running(False)
def test_404_download(self): q = Queue() q2 = Queue() q.put((0, {"url": "http://google.com/blah", "dest": test_dest})) worker = Worker({"wait_task": 0, "wait_retry": 0}, q, q2) worker.start() worker.join() self.assertTrue(q.empty()) q2.get() self.assertIsInstance(q2.get()[1]["error"], requests.exceptions.HTTPError)
def test_no_dest(self): q = Queue() q2 = Queue() q.put((0, {"url": "url"})) q.put((1, {"url": "url", "dest": ""})) worker = Worker({"wait_task": 0}, q, q2) worker.start() worker.join() self.assertTrue(q.empty()) self.assertIsInstance(q2.get()[1]["error"], NoDestinationPathException) self.assertIsInstance(q2.get()[1]["error"], NoDestinationPathException)
def test_unsupport_protocol(self): q = Queue() q2 = Queue() q.put((0, {"url": "url", "dest": test_dest})) q.put((1, {"url": "abc://path", "dest": test_dest})) worker = Worker({"wait_task": 0}, q, q2) worker.start() worker.join() self.assertTrue(q.empty()) q2.get() self.assertIsInstance(q2.get()[1]["error"], UnsupportedProtocolException) q2.get() self.assertIsInstance(q2.get()[1]["error"], UnsupportedProtocolException)
def test_success_download(self): works = Queue(maxsize=0) progresses = Queue(maxsize=0) for i, key in enumerate(test_urls): works.put((i + 1, { "url": test_urls[key]["url"], "dest": test_dest })) for i in range(4): worker = Worker({ "wait_task": 1, "wait_retry": 0, "max_retry": 1 }, works, progresses, name="worker{}".format(i)) worker.setDaemon(True) worker.start() visualizer = Visualizer(4, progresses, name="visualizer") visualizer.start() works.join() visualizer.join() self.assertTrue(works.empty()) self.assertTrue(progresses.empty()) self.assertEqual(visualizer.success, 4) self.assertEqual(visualizer.success, visualizer.task) self.assertTrue(not visualizer.results) for key in test_urls: self.assertTrue(os.path.exists(test_urls[key]["path"])) with open(test_urls[key]["path"], "rb") as f: data = f.read() self.assertEqual( hashlib.md5(data).hexdigest(), test_urls[key]["md5"]) FileManager.remove_file(test_urls[key]["path"])
def test_fail_download(self): q = Queue() q2 = Queue() selects = {} for i in range(10): choice = random.choice(list(test_urls.keys())) if choice in selects.keys(): selects[choice] += 1 else: selects[choice] = 1 q.put((i, {"url": test_urls[choice]["url"], "dest": test_dest})) worker = Worker({"wait_task": 1, "wait_retry": 0, "max_retry": 1}, q, q2, test_net=True) worker.start() worker.join() self.assertTrue(q.empty()) self.assertTrue(len(os.listdir(test_dest)) == 0)
def ssh_connect(self, args): ssh = SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) LOG.debug("ssh connect args: {}".format(args)) dst_addr = args[:2] LOG.info("Connecting to {}:{}".format(*dst_addr)) try: ssh.connect(*args, timeout=6) except socket.error: raise ValueError('Unable to connect to {}:{}'.format(*dst_addr)) except paramiko.BadAuthenticationType: raise ValueError('Bad authentication type.') except paramiko.AuthenticationException: raise ValueError('Authentication failed.') except paramiko.BadHostKeyException: raise ValueError('Bad host key.') chan = ssh.invoke_shell(term='xterm') chan.setblocking(0) worker = Worker(self.loop, ssh, chan, dst_addr) worker.encoding = self.get_default_encoding(ssh) return worker
def _task_action(self, f): w = Worker(driver=self.driver, device=self.device.get("deviceName"), reset=self.device.get("reset", None)) w.execute(f=f)
saver = tf.train.Saver(max_to_keep=3, var_list=GLOBAL_AC.getVars+[global_step]) GLOBAL_AC.InitializeVariablesFromFile(saver,MODEL_PATH) progbar = tf.keras.utils.Progbar(None, unit_name='Training',stateful_metrics=["Reward"]) writer = tf.summary.FileWriter(LOG_PATH,graph=sess.graph) # Create workers workers = [] for i in range(settings["NumberENV"]): i_name = 'W_%i' % i # worker name network = Network(settings["NetworkConfig"],nActions,netConfigOverride,scope=i_name) Method = GetFunction(settings["Method"]) localNetwork = Method(network,sess,stateShape=dFeatures,actionSize=nActions,scope=i_name,HPs=settings["NetworkHPs"],globalAC=GLOBAL_AC,nTrajs=nTrajs) localNetwork.InitializeVariablesFromFile(saver,MODEL_PATH) env,_,_,_ = CreateEnvironment(envSettings,multiprocessing=1) workers.append(Worker(localNetwork,env,sess,global_step,global_step_next,settings,progbar,writer,MODEL_PATH,saver)) InitializeVariables(sess) #Included to catch if there are any uninitalized variables. COORD = tf.train.Coordinator() worker_threads = [] for i,worker in enumerate(workers): if i==0: job = lambda: worker.work(COORD,render=args.render) else: job = lambda: worker.work(COORD) t = threading.Thread(target=job) t.start() worker_threads.append(t) COORD.join(worker_threads)
last_minute = (work_hours + start_hour) * 60 prob_incoming_clients = [0.71, 0.23, 0.05, 0.01] prob_service_type = [0.5, 0.4, 0.1] avg_service_duration = [12, 15, 25] stdev_service_duration = [0.8, 1.0, 3.0] num_of_workers = 4 N = 0 clients_arrival_interval = 5 maxrow = 8 EVENTS = [] ROW = [] WORKERS = [Worker() for _ in range(num_of_workers)] time = start_hour * 60 ncl = 0 N = 0 Nserved = 0 Nwaited = 0 Twaited = 0 EVENTS = [] ev = Event(time, 1, (1, )) EVENTS = fill_events(EVENTS, ev) ev = Event(last_minute, 8, (1, )) EVENTS = fill_events(EVENTS, ev) t = time + int(round(pr.exponential(clients_arrival_interval)))
import time import json import gmqtt from utils.utils import run_event_loop, STOP, init_client from utils.consts import WORKER_REGISTRED_TOPIC, WORKER_REGISTRATION_TOPIC, WORKER_UNREGISTER_TOPIC, \ WORKER_RESULT_TOPIC, BALANCER_WORKER_TOPIC from utils.worker import Worker worker = Worker() async def on_message(client, topic, payload, qos, properties): if topic == WORKER_REGISTRED_TOPIC: data = json.loads(payload.decode('utf-8')) worker_hex = data.get("worker_hex") worker_num = data.get('worker_num') if not worker.is_registered() and worker.worker_hex == worker_hex: worker.register(worker_num) client.subscribe(f'{worker.balancer_topic}', qos=1) elif topic == worker.balancer_topic and worker.is_registered(): print(f'Worker {worker.number}. Publish.', f"Topic: '{worker.result_topic}'.", 'Payload:', payload) client.publish(worker.result_topic, payload, qos=1) return 0 async def main(broker_host, token): will_message = gmqtt.Message(WORKER_UNREGISTER_TOPIC, worker.worker_hex,
# Quit if inputs is empty if not inputs: raise Exception("No inputs given") works = Queue(maxsize=0) progresses = Queue(maxsize=0) # Put work to Queue for i, info in enumerate(inputs.values()): works.put((i + 1, info)) # Setup workers num_threads = min(config.get("max_worker", 5), len(inputs)) for i in range(num_threads): worker = Worker(config, works, progresses, name="worker{}".format(i)) worker.setDaemon(True) worker.start() # Setup visualizer visualizer = Visualizer(len(inputs), progresses, name="visualizer") visualizer.start() # Wait until works Queue and visualizer finished works.join() visualizer.join() except FileNotFoundError as errf: print(errf) except Exception as e: