def test_initialization(): config = Config.from_environ() config['Blockchain']['GasPrice'] = 'fast' worker = Worker(config) worker.web3.eth.setGasPriceStrategy.assert_called_once_with( fast_gas_price_strategy) assert worker.dynamic_gas_price_strategy assert worker.gas_price == worker.web3.eth.generateGasPrice() config['Blockchain']['GasPrice'] = 'medium' worker = Worker(config) worker.web3.eth.setGasPriceStrategy.assert_called_once_with( medium_gas_price_strategy) assert worker.dynamic_gas_price_strategy assert worker.gas_price == worker.web3.eth.generateGasPrice() config['Blockchain']['GasPrice'] = 6000000 worker = Worker(config) assert not worker.dynamic_gas_price_strategy assert worker.gas_price == config['Blockchain']['GasPrice'] worker.web3.eth.setGasPriceStrategy.assert_not_called() config['Blockchain']['GasPrice'] = 'slow' with pytest.raises(Exception) as einfo: worker = Worker(config) assert str(einfo.value) == 'Invalid gas price strategy:\'slow\'' config['Blockchain']['GasPrice'] = None with pytest.raises(Exception) as einfo: worker = Worker(config) assert str(einfo.value) == 'Invalid gas price strategy:None'
def test_gas_price_update(load_unprocessed_document, generate_gas_price): gas_price = 111 generate_gas_price.return_value = gas_price config = Config.from_environ() config['Blockchain']['GasPrice'] = 'fast' config['Blockchain']['GasPriceRefreshRate'] = 2 key = 'document-key' document = DOCUMENT_V2_TEMPLATE.substitute( DocumentStoreAddress=config['DocumentStore']['Address']) document = json.loads(document) load_unprocessed_document.return_value = key, document message = mock.Mock() message.body = json.dumps({'Records': [{}]}) worker = Worker(config) worker.web3.eth.sendRawTransaction.return_value = b'transaction-hash' worker.web3.eth.waitForTransactionReceipt().status = 1 assert worker.gas_price == gas_price # testing transaction timeout causing gas price increase generate_gas_price.reset_mock() worker.web3.eth.waitForTransactionReceipt.side_effect = TimeExhausted assert not worker.process_message(message) generate_gas_price.assert_not_called() assert worker.gas_price == int(gas_price * 1.1) # testing gas price refresh generate_gas_price.reset_mock() worker.web3.eth.waitForTransactionReceipt.side_effect = None for i in range(config['Blockchain']['GasPriceRefreshRate']): assert worker.process_message(message) generate_gas_price.assert_called_once() # testing no refresh on static gas price config['Blockchain']['GasPrice'] = 20 worker = Worker(config) worker.web3.eth.sendRawTransaction.return_value = b'transaction-hash' worker.web3.eth.waitForTransactionReceipt().status = 1 worker.web3.reset_mock() for i in range(config['Blockchain']['GasPriceRefreshRate']): assert worker.process_message(message) worker.web3.eth.generateGasPrice.assert_not_called()
def test23hostPermissions(self): worker = Worker() self.source += 'test23hostPermissions' self.destination = self.source + '_delete' worker.work(self.source) expected = 3 actual = worker.wrapper.getManifestVersion() self.assertEqual(actual, expected, 'manifest_version') manifest = worker.wrapper.manifest key = 'permissions' self.assertIn(key, manifest) self.assertEqual(len(manifest[key]), 2) key = 'optional_permissions' self.assertIn(key, manifest) self.assertEqual(len(manifest[key]), 1) key = 'host_permissions' self.assertIn(key, manifest) self.assertEqual(len(manifest[key]), 2) shutil.rmtree(self.destination)
def add_all(self, subject): if subject == self.form.ID: # use cached unmapped unmapped = self._current['unmapped'] adder = self.wc_adder else: # use cached wc_unmapped unmapped = self._current['wcunmapped'] adder = self.moein_adder # get checked-unmapped ids checked = [int(row[0]) for row in self.options_list.getChecked()] # filter unmapped by checked unmapped = [um for um in unmapped if um['id'] in checked] # check for unmapped if unmapped: pd = Progress(self.options_list, self.messages[8], 0, len(unmapped)) pd.show() worker = Worker(adder, unmapped) worker.signals.progress.connect(pd.setValue) worker.signals.error.connect(pd.close) worker.signals.error.connect(self.add_all_error) worker.signals.done.connect(self.add_all_done) QThreadPool.globalInstance().start(worker) self.options_list.btnAddAll.setDisabled(True)
def run(self): for item in self.target: logger.debug('start ' + item.__name__) worker = Worker(item) self.pool.start(worker) self.pool.waitForDone() logger.debug('thread finished')
def setUp(self) -> None: self.manager = Manager() self.queue = self.manager.Queue() self.done = self.manager.Value('i', 0) self.failed = self.manager.Value('i', 0) self.worker = Worker(MockSenderFactory(), len(self.files), self.queue, self.done, self.failed)
def distribute(self, files: List[str]) -> None: with Manager() as manager: worker = Worker(self.sender_factory, len(files), self.queue, manager.Value('i', 0), manager.Value('i', 0)) with Pool(self.processes) as pool: pool.map(worker.upload, files) self.active.value = False worker.notify('')
def __init__(self): self.log = Logger.get_logger_instance() self.__config = Config.get_config_instance() self.__worker = Worker() self.queue_handler = TaskHandler( self.__config["queue"]["job_type"], self.__config["queue"]["task_type"], self.__config["queue"]["job_manager_url"], self.__config["queue"]["heartbeat_manager_url"], self.__config["queue"]["heartbeat_interval_seconds"], self.log)
def test_mv3_to_mv2_B(self): worker = Worker() self.source += 'tabstourls_mv3' self.destination = self.source + '_delete' worker.work(self.source) expected = 2 actual = worker.wrapper.getManifestVersion() self.assertEqual(actual, expected, 'manifest_version') shutil.rmtree(self.destination)
def test(unprocessed_queue, unprocessed_bucket, issued_bucket, unwrap, wrap): config = Config.from_environ() document_store_address = config['DocumentStore']['Address'] # overriding safe VisibilityTimeout config['Worker']['Polling']['VisibilityTimeout'] = 1 queue_test_wait_time_seconds = config['Worker']['Polling'][ 'VisibilityTimeout'] * 2 document_v2 = DOCUMENT_V2_TEMPLATE.substitute( DocumentStoreAddress=document_store_address) document_v2 = json.loads(document_v2) wrapped_document_v2 = wrap(document_v2, '2.0') document_v3 = DOCUMENT_V3_TEMPLATE.substitute( DocumentStoreAddress=document_store_address) document_v3 = json.loads(document_v3) wrapped_document_v3 = wrap(document_v3, '3.0') worker = Worker(config) index = 1 # checking both schema versions to test auto version definition for document in [document_v2, document_v3]: key = f'document-{index}' unprocessed_bucket.Object(key).put(Body=json.dumps(document)) worker.poll() issued_document = json.load(issued_bucket.Object(key).get()['Body']) assert unwrap(issued_document) == document index += 1 time.sleep(queue_test_wait_time_seconds) index = 1 # checking both schema versions to test auto version definition for wrapped documents for document in [wrapped_document_v2, wrapped_document_v3]: key = f'wrapped-document-{index}' unprocessed_bucket.Object(key).put(Body=json.dumps(document)) worker.poll() issued_document = json.load(issued_bucket.Object(key).get()['Body']) assert unwrap(issued_document) == unwrap(document) index += 1 time.sleep(queue_test_wait_time_seconds) # check that all messages were processed assert not unprocessed_queue.receive_messages( WaitTimeSeconds=queue_test_wait_time_seconds, MaxNumberOfMessages=1, VisibilityTimeout=0) # Checking issuing already issued wrapped document # it should be moved to issued bucket without calling contract.issue method # after signature and document store verifications passed key = 'issued-wrapped-document' assert worker.is_issued_document(wrapped_document_v2) unprocessed_bucket.Object(key).put(Body=json.dumps(wrapped_document_v2)) worker.poll() issued_document = json.load(issued_bucket.Object(key).get()['Body']) assert issued_document == wrapped_document_v2
def main(): # PBT_Quadratic Environment initialization convergenceTolerance = 10e-4 #??? maxStep = 30 # updateInterval = 4 # every 4 iteration, do an update init_theta = [0.9, 0.9] # set initial weights #create the worker population numOfWorkers = 2 init_hyperParam = [[0, 1], [1, 0]] worker_list = [ Worker(init_theta, init_hyperParam[i]) for i in range(numOfWorkers) ] run1 = train(worker_list, step, eval, ready, exploit, explore, lossFunc, convergenceTolerance, maxStep) # Visualization # def plot_value(run, i, steps, title): # plt.subplot(2, 4, i) # plt.plot(run[0].eval_history, color='b', lw=0.7) # plt.plot(run[1].eval_history, color='r', lw=0.7) # plt.axhline(y=1.2, linestyle='dotted', color='k') # axes = plt.gca() # axes.set_xlim([0, steps]) # axes.set_ylim([0.0, 1.21]) # # plt.title(title) # plt.xlabel('Step') # plt.ylabel('Q') # return def plot_theta(run, i, steps, title): x_b = [_[0] for _ in run[0].theta_history] y_b = [_[1] for _ in run[0].theta_history] x_r = [_[0] for _ in run[1].theta_history] y_r = [_[1] for _ in run[1].theta_history] plt.subplot(2, 4, i) plt.scatter(x_b, y_b, color='b', s=2) plt.scatter(x_r, y_r, color='r', s=2) plt.title(title) plt.xlabel('theta0') plt.ylabel('theta1') return plot_theta(run1, 1, steps=maxStep, title='PBT') plt.show()
def test_mv3_to_mv2_C(self): worker = Worker() self.source += 'timebadge_mv3' self.destination = self.source + '_delete' worker.work(self.source) expected = 2 actual = worker.wrapper.getManifestVersion() self.assertEqual(actual, expected, 'manifest_version') manifest = worker.wrapper.manifest self.assertIn('background', manifest) self.assertIn('scripts', manifest['background']) shutil.rmtree(self.destination)
def test23contentSecurityPolicy(self): worker = Worker() self.source += 'test23contentSecurityPolicy' self.destination = self.source + '_delete' worker.work(self.source) expected = 3 actual = worker.wrapper.getManifestVersion() self.assertEqual(actual, expected, 'manifest_version') manifest = worker.wrapper.manifest key = 'content_security_policy' self.assertIn(key, manifest) self.assertIn('extension_pages', manifest[key]) self.assertIn('sandbox', manifest[key]) shutil.rmtree(self.destination)
def run(self): self._create_socket() print(f'server started on {self.config.host}:{self.config.port}') workers = [] for x in range(self.config.cpu_limit): w = Worker(self._sock, self.config) workers.append(w) w.start() try: for w in workers: w.join() except KeyboardInterrupt: for w in workers: w.terminate() finally: self._sock.close()
def test23executeScript(self): worker = Worker() self.source += 'test23executeScript' self.destination = self.source + '_delete' worker.work(self.source) expected = 3 actual = worker.wrapper.getManifestVersion() self.assertEqual(actual, expected, 'manifest_version') manifest = worker.wrapper.manifest self.assertIn('background', manifest) self.assertIn('service_worker', manifest['background']) self.assertEqual(manifest['background']['service_worker'], 'service_worker.js') self.assertIn('permissions', manifest) self.assertIn('scripting', manifest['permissions']) shutil.rmtree(self.destination)
def test_mv2_C(self): worker = Worker() self.source += 'backgroundScripts_mv2' self.destination = self.source + '_delete' worker.work(self.source) expected = 3 actual = worker.wrapper.getManifestVersion() self.assertEqual(actual, expected, 'manifest_version') manifest = worker.wrapper.manifest self.assertIn('background', manifest) self.assertIn('service_worker', manifest['background']) self.assertEqual(manifest['background']['service_worker'], 'service_worker.js') self.assertFalse( os.path.exists(worker.wrapper.destination + os.sep + 'script1.js')) self.assertFalse( os.path.exists(worker.wrapper.destination + os.sep + 'script2.js')) shutil.rmtree(self.destination)
def update(self): conn = connection.get() self.model.set_connection(conn) try: mapped = self.model.mapped(update_required=True) except Exception as e: msg = Message(self.ui, Message.ERROR, self.messages[4], str(e)) msg.show() else: if mapped: pd = Progress(self.ui, self.messages[9], 0, len(mapped)) pd.show() worker = Worker(self.updater, mapped) worker.signals.progress.connect(pd.setValue) worker.signals.error.connect(pd.close) worker.signals.error.connect(self.update_error) worker.signals.done.connect(self.update_done) QThreadPool.globalInstance().start(worker) self.tab.btnUpdate.setDisabled(True) finally: conn.close()
def test23webAccessibleResources(self): worker = Worker() self.source += 'test23webAccessibleResources' self.destination = self.source + '_delete' worker.work(self.source) expected = 3 actual = worker.wrapper.getManifestVersion() self.assertEqual(actual, expected, 'manifest_version') manifest = worker.wrapper.manifest self.assertIn('background', manifest) self.assertIn('service_worker', manifest['background']) self.assertEqual(manifest['background']['service_worker'], 'service_worker.js') self.assertIn('permissions', manifest) self.assertIn('scripting', manifest['permissions']) key = 'web_accessible_resources' self.assertIn(key, manifest) self.assertEqual(len(manifest[key][0]['resources']), 2) shutil.rmtree(self.destination)
while latest_block() < target: time.sleep(1) logger.info('Blocks mined') # making temporary directory for each test run to prevent picking previos block logs worker_config_json['Worker']['General'][ 'ListenerBlocksLogDir'] = tempfile.mkdtemp() # configuration with no from block set, worker should pick from blocks from blocks log no_from_block_config = Config().load(worker_config_json) # modifying Listener.Event.Filter.fromBlock to not receive older messages for listener in worker_config_json['Listeners']: listener['Event']['Filter']['fromBlock'] = latest_block() + 1 latest_from_block_config = Config().load(worker_config_json) worker = Worker(latest_from_block_config) # preparing queues message_received_event_queue = empty_queue('message-received-event') message_sent_event_queue = empty_queue('message-sent-event') message_event_queue = empty_queue('message-event') message_sent_event = {'receiver': 'AU', 'text': '1'} message_received_event = {'receiver': 'AU', 'text': '2'} # emitting two events with wait_for_blocks(2): MessageSent(message_sent_event['receiver'], message_sent_event['text']) MessageReceived(message_received_event['receiver'], message_received_event['text'])
def setUp(self): self.workers = [ Worker(init_hyperParam=[0, 1], init_theta=[0.9, 0.9]), Worker(init_hyperParam=[1, 0], init_theta=[0.5, 0.5]) ]
if __name__ == '__main__': PARSER = argparse.ArgumentParser( description='Client tool for DoEnjoy mini-Spark') subparsers = PARSER.add_subparsers() init_wordcount_client_parser(subparsers) init_pagerank_client_parser(subparsers) init_wordcount_streaming_client_parser(subparsers) init_master_parser(subparsers) init_worker_parser(subparsers) ARGS = PARSER.parse_args() if ARGS.action == 'master': master = Master(ARGS.port, ARGS.debug) master.run() elif ARGS.action == 'worker': worker = Worker(ARGS.master_address, ARGS.self_address, ARGS.debug) worker.run() elif ARGS.action == 'page_rank': page_rank_client = PageRankClient(ARGS.file_path, ARGS.iterative) client = get_client(ARGS.master_address) execute_command(client, client.get_job, pickle_object(page_rank_client), ARGS.self_address) page_rank_client.start_server("0.0.0.0") elif ARGS.action == 'word_count': word_count_client = WordCountClient(ARGS.file_path) client = get_client(ARGS.master_address) execute_command(client, client.get_job, pickle_object(word_count_client), ARGS.self_address) word_count_client.start_server("0.0.0.0:" + ARGS.self_address.split(":")[1]) elif ARGS.action == 'wordcount_streaming':
def call_worker(fn, ui=None, target_page=None, text=''): """takes a function and runs it in another thread, and shows loading screen with the specified text. upon success it proceeds to the target page. If it failed, it returns to the previous page""" worker = Worker(fn, target_page, ui, text) ui.thread_pool.start(worker)
from src.worker import Worker # pragma: no cover from src.config import Config # pragma: no cover Worker(Config.from_environ()).start() # pragma: no cover
entropy_beta = 0.005 model_path = './net/a3c.ckpt' output_graph = True graph_dir = './graph_log' env = gym.make("Festium-v2") tf.reset_default_graph() with tf.device("/cpu:0"): global_episodes = tf.Variable(0, dtype=tf.int32) trainer = tf.train.AdamOptimizer(learning_rate=1e-4) master_net = AC_Network(env, 'global', model_path, None, None) num_workers = multiprocessing.cpu_count() workers = [] for i in range(num_workers): workers.append( Worker(env, i, trainer, model_path, global_episodes, max_global_steps, entropy_beta)) saver = tf.train.Saver(max_to_keep=5) with tf.Session() as sess: try: coord = tf.train.Coordinator() sess.run(tf.global_variables_initializer()) if output_graph: if os.path.exists(graph_dir): shutil.rmtree(graph_dir) tf.summary.FileWriter(graph_dir, sess.graph) worker_threads = [] for worker in workers: worker_work = lambda: worker.work(max_episode_length, gamma, sess,