class GEWorker(daemon.Daemon): "Our worker daemon - takes Gearman jobs and does them" def __init__(self): super(GEWorker, self).__init__() self.worker = GearmanWorker(settings.GEARMAN_SERVERS) self.worker.register_function(messaging.SUBJECT_THREAD_COUNT, self.thread_count) self.worker.register_function(messaging.SUBJECT_THREAD_NAMES, self.thread_names) self.worker.register_function(messaging.SUBJECT_AVG, self.average) self._set_transaction_isolation() def _set_transaction_isolation(self): """ Defaut transaction isolation on MySQL InnoDB is REPEATABLE-READ, which means a connection always gets the same result for a given query, even if the data has changed. This daemon runs for a long time, we need to see db changes, so change to READ-COMMITTED. """ cur = connection.cursor() cur.execute("set session transaction isolation level read committed") cur.close() def run(self): """Entry point for daemon.Daemon subclasses - main method""" self.worker.work() # Never returns def wait_for_thread(self): 'Sleeps until a thread is available. Gearman will queue the requests whilst we pause here' if settings.DEBUG: connection.queries = [] # Prevent query list growing indefinitely running_threads = threading.active_count() while running_threads >= settings.MAX_WORKER_THREADS: logging.debug( 'Waiting for thread. Currently %s: %s' % (running_threads, self.thread_names()) ) time.sleep(1) def thread_count(self, job): 'Returns number of threads currently running' return str(threading.active_count()) +'\n' def thread_names(self, job): 'Returns an array of active thread names' return str( ', '.join([thread.name for thread in threading.enumerate()]) ) +'\n' def average(self, job): """ Calculates daily indicator average for group and overall @job.arg Id of the new Answer to include. """ self.wait_for_thread() worker_thread = AverageWorker(job.arg) worker_thread.start()
def main(): gw = GearmanWorker(['127.0.0.1:4730']) gw.register_task("weibo_spider", weibo) gw.register_task("bbs_spider", bbs) gw.register_task("news_spider", news) gw.register_task("blog_spider", blog) gw.register_task("media_spider", media) gw.register_task("class_spider", same_class) gw.work()
class ImageRecognitionWorker: worker = None db = None def __init__(self): self.worker = GearmanWorker(['gearman.emag-bot.com']) self.worker.register_task('imgrecon', self.ImageRecognition) self.db = DBInterface() def Work(self): self.worker.work() def ComputeHistogram(self, url): ima = ImageAnalyser() ima.LoadImage(url, PathLocation.ONLINE) contour = ima.FindContour() im_e = ima.ExtractContour(contour) #ima.ShowImage(im_e) hist = ima.GetHueHistogram(im_e) return hist def ImageRecognition(self, worker, job): print("Got job: " + job.data) data = json.loads(job.data) hist = self.ComputeHistogram(data['url']) db_entries = self.db.QueryForLabels(data['labels']) accepted_entries = [[], []] i = 0 for row in db_entries: #print(row) if row[3] is None or row[3] == '': continue data = json.loads(row[3]) data = np.array([[d] for d in data], dtype=np.float32) res = cv2.compareHist(hist, data, cv2.HISTCMP_CORREL) if row[4] in accepted_entries[1]: idx = accepted_entries[1].index(row[4]) if res > accepted_entries[0][idx]: accepted_entries[0][idx] = res else: accepted_entries[0].append(res) accepted_entries[1].append(row[4]) ret = [ x for _, x in sorted(zip(accepted_entries[0], accepted_entries[1]), reverse=True) ] print(ret) return json.dumps(ret)
def gearmanWorker(self): self.logging.info("Gearmand worker instance started") while self.loop(): try: worker_instance=GearmanWorker(self.hostlist) worker_instance.register_task(self.queue, self.consume) worker_instance.work() except Exception as err: self.logging.warn ('Connection to gearmand failed. Reason: %s. Retry in 1 second.'%err) sleep(1)
def gearmanWorker(self): self.logging.info("Gearmand worker instance started") while self.loop(): try: worker_instance = GearmanWorker(self.hostlist) worker_instance.register_task(self.queue, self.consume) worker_instance.work() except Exception as err: self.logging.warn( 'Connection to gearmand failed. Reason: %s. Retry in 1 second.' % err) sleep(1)
def startFetchDependencyInfoWorkers(): import FetchDependencyInfoWorker stopFetchDependencyInfoWorker() for i in range(0, 1): time.sleep(1) result = os.fork() if result == 0: workerPid = os.getpid() fp = open(FetchDependencyInfoWorkerPidFilePath, "a") fp.write(" %s" % workerPid) fp.close() worker = GearmanWorker([GearmanConfig.gearmanConnection]) worker.register_task(JobList.Job_fetchDependencyInfo, FetchDependencyInfoWorker.doWork) worker.work() pass
def main(): print("Running Worker .....") print("options.logging = %s" % options.logging) from demisaucepy import cache_setup cache_setup.load_cache() #global app #app = AppBase() logging.info("site_root = %s" % options.site_root) logging.info("smtp servers = %s" % options.smtp_server) logging.info("cache servers = %s" % options.memcached_servers) logging.info("gearman servers 2 = %s" % (options.gearman_servers)) logging.error("where does this go in supervisord?") worker = GearmanWorker(options.gearman_servers) worker.register_function("email_send", emailer.email_send) worker.register_function("image_resize", assets.image_resize) worker.register_function("echo", echo) from demisauce.model import actions actions.register_workers(worker) worker.work()
class RequestWorker: worker = None def __init__(self): self.worker = GearmanWorker(['gearman.emag-bot.com']) self.worker.register_task('getrawdata', self.GetRawData) def Work(self): self.worker.work() def GetRawData(self, worker, job): print("Got job: " + job.data) ima = ImageAnalyser() ima.LoadImage(job.data, PathLocation.ONLINE) contour = ima.FindContour() im_e = ima.ExtractContour(contour) #ima.ShowImage(im_e, "mask") hist = ima.GetHueHistogram(im_e) return json.dumps(hist.flatten().tolist())
myj = Jenkins(jenkins_data['url']) job = myj.get_job(jenkins_data['job_id']) #job.invoke(securitytoken=token, block=block) job.invoke(invoke_pre_check_delay=0) except: rev = "Not Happy!!!" return rev # Establish a connection with the job server on localhost--like the client, # multiple job servers can be used. worker = GearmanWorker(['localhost']) # register_task will tell the job server that this worker handles the "echo" # task worker.set_client_id('your_worker_client_id_name') worker.register_task('echo', task_listener_echo) worker.register_task('build:pep8', task_listener_build) worker.register_task('stop:jenkins_master.hp.com', task_listener_stop) worker.register_task('bravo', task_listener_echo) worker.register_task('reverse', task_listener_reverse) worker.register_task('jenkins_invoke_job', task_listener_jenkins_invoke_job) # Once setup is complete, begin working by consuming any tasks available # from the job server print 'working...' worker.work() # The worker will continue to run (waiting for new work) until exited by # code or an external signal is caught
#TODO: Log the error return str(e) ################################################ def task_listener_email(gearman_worker, gearman_job): #TODO: Log the call and params cleaned_words, reply, meta, infile, sample_text = process_words( gearman_job.data) #TODO: Log the call and params res = inverted_index(cleaned_words, reply, meta, infile, sample_text) print res #TODO: Log the result if res != 'Ok': # Log the stack trace and exception return 'NotOk' else: return res if __name__ == "__main__": # TODO: Daemonize # TODO: Ensure there are not multiple monitors for the same location (check for lock file) # TODO: Make the strings config driven # TODO: Have a meaningful try-except block gm_worker.set_client_id('inv-index-worker') gm_worker.register_task('invindex', task_listener_email) gm_worker.work()
# coding = utf-8 import os import gearman import math from gearman import GearmanWorker from lbs_class import user_lbs import json def task_callback(gearman_worker, job): print job.data a = json.loads(job.data) store = a['store'] dist = a['dist'] center = [a['center']['lng'],a['center']['lat']] start = a['time_start'] end = a['time_end'] klx = user_lbs(store,dist,center,start,end) return 'successful received' new_worker = GearmanWorker(['192.168.5.41:4730']) new_worker.register_task("user_lbs", task_callback) new_worker.work()
return False def task_listener(gearman_worker, gearman_job): task_name, video_id, segment_id = pickle.loads(gearman_job.data) result = False if task_name == 'transcode': result = transcode_segment(video_id, segment_id) elif task_name == 'thumbnail': result = generate_thumbnail(video_id, segment_id) return pickle.dumps(result) if __name__ == "__main__": # worker run logger.info("Setting up the worker.") gm_worker = GearmanWorker([GEARMAND_HOST_PORT]) gm_worker.register_task(SEGMENT_TASK_NAME, task_listener) try: logger.info("Worker was set up successfully. Waiting for work.") gm_worker.work() except KeyboardInterrupt: gm_worker.shutdown() logger.info("Worker has shut down successfully. Bye.")
def handle(self, *args, **options): print "worker started" worker = GearmanWorker(["127.0.0.1"]) worker.register_function("download", download) worker.work()
'''find out information in the bundle''' data = bundle.split('||') result = {} if data: if data[0]: result['nid'] = data[0] result['content'] = data[1] return result else: return None return None def task_add(gearman_worker, gearman_job): '''insert valid item to the database''' job_data = parse(gearman_job.data) mongo_col.save(job_data) return 'okay' def task_edit(gearman_worker, gearman_job): '''update valid item with a new value''' # TODO add try-except job_data = parse(gearman_job.data) print str(job_data) mongo_col.update({'nid':job_data['nid']}, {'$set':{'content':job_data['content']}}) return 'okay' gworker.register_task('add', task_add) gworker.register_task('edit', task_edit) gworker.work()
class GearmanIn(Actor): '''**Consumes events/jobs from Gearmand.** Consumes jobs from a Gearmand server. When secret is none, no decryption is done. Parameters: - hostlist(list)(["localhost:4730"]) | A list of gearmand servers. Each entry should have | format host:port. - secret(str)(None) | The AES encryption key to decrypt Mod_gearman messages. - workers(int)(1) | The number of gearman workers within 1 process. - queue(str)(wishbone) | The queue to consume jobs from. - enable_keepalive(bool)(False) | Attempt to monkey patch the gearmand module to enable socket | keepalive. Queues: - outbox: Outgoing events. ''' def __init__(self, actor_config, hostlist=["localhost:4730"], secret=None, workers=1, queue="wishbone", enable_keepalive=False): Actor.__init__(self, actor_config) self.pool.createQueue("outbox") self.background_instances = [] if self.kwargs.secret is None: self.decrypt = self.__plainTextJob else: key = self.kwargs.secret[0:32] self.cipher = AES.new(key + chr(0) * (32 - len(key))) self.decrypt = self.__encryptedJob def preHook(self): if self.kwargs.enable_keepalive: self.logging.info("Requested to monkey patch Gearmand") if gearman_version == "2.0.2": self.logging.info("Detected gearman version 2.0.2, patching sockets with SO_KEEPALIVE enabled.") self.gearmanWorker = self._gearmanWorkerPatched else: self.logging.warning("Did not detect gearman version 2.0.2. Not patching , patching sockets with keepalive enabled.") self.gearmanWorker = self._gearmanWorkerNotPatched else: self.gearmanWorker = self._gearmanWorkerNotPatched for _ in range(self.kwargs.workers): self.sendToBackground(self.gearmanWorker) self.sendToBackground(self.monitor) def consume(self, gearman_worker, gearman_job): decrypted = self.decrypt(gearman_job.data) event = Event(decrypted) self.submit(event, self.pool.queue.outbox) return gearman_job.data def __encryptedJob(self, data): return self.cipher.decrypt(base64.b64decode(data)) def __plainTextJob(self, data): return data def _gearmanWorkerPatched(self): self.logging.info("Gearmand worker instance started") while self.loop(): try: with mock.patch.object(GearmanConnection, '_create_client_socket', create_client_socket): self.worker_instance = GearmanWorker(self.kwargs.hostlist) self.worker_instance.register_task(self.kwargs.queue, self.consume) self.worker_instance.work() except Exception as err: self.logging.warn("Connection to gearmand failed. Reason: '%s'. Retry in 1 second." % err) sleep(1) finally: self.worker_instance.shutdown() def _gearmanWorkerNotPatched(self): self.logging.info("Gearmand worker instance started") while self.loop(): try: self.worker_instance = GearmanWorker(self.kwargs.hostlist) self.worker_instance.register_task(self.kwargs.queue, self.consume) self.worker_instance.work() except Exception as err: self.logging.warn("Connection to gearmand failed. Reason: '%s'. Retry in 1 second." % err) sleep(1) finally: self.worker_instance.shutdown() def monitor(self): self.logging.info("Connection monitor started.") while self.loop(): sleep(5) for conn in self.worker_instance.connection_list: if not conn.connected: self.logging.error("Connection to '%s' is dead. Trying to reconnect." % (conn.gearman_host)) try: conn.connect() self.logging.info("Connection to '%s' is restored." % (conn.gearman_host)) except Exception as err: self.logging.error("Failed to reconnect to '%s'. Retry in 5 seconds. Reason: '%s'" % (conn.gearman_host, err)) else: self.logging.debug("Connection to '%s' is alive." % (conn.gearman_host))
class GearmanIn(Actor): '''**Consumes events/jobs from Gearmand.** Consumes jobs from a Gearmand server. When secret is none, no decryption is done. Parameters: - hostlist(list)(["localhost:4730"]) | A list of gearmand servers. Each entry should have | format host:port. - secret(str)(None) | The AES encryption key to decrypt Mod_gearman messages. - workers(int)(1) | The number of gearman workers within 1 process. - queue(str)(wishbone) | The queue to consume jobs from. - enable_keepalive(bool)(False) | Attempt to monkey patch the gearmand module to enable socket | keepalive. Queues: - outbox: Outgoing events. ''' def __init__(self, actor_config, hostlist=["localhost:4730"], secret=None, workers=1, queue="wishbone", enable_keepalive=False): Actor.__init__(self, actor_config) self.pool.createQueue("outbox") self.background_instances = [] if self.kwargs.secret is None: self.decrypt = self.__plainTextJob else: key = self.kwargs.secret[0:32] self.cipher = AES.new(key + chr(0) * (32 - len(key))) self.decrypt = self.__encryptedJob def preHook(self): if self.kwargs.enable_keepalive: self.logging.info("Requested to monkey patch Gearmand") if gearman_version == "2.0.2": self.logging.info( "Detected gearman version 2.0.2, patching sockets with SO_KEEPALIVE enabled." ) self.gearmanWorker = self._gearmanWorkerPatched else: self.logging.warning( "Did not detect gearman version 2.0.2. Not patching , patching sockets with keepalive enabled." ) self.gearmanWorker = self._gearmanWorkerNotPatched else: self.gearmanWorker = self._gearmanWorkerNotPatched for _ in range(self.kwargs.workers): self.sendToBackground(self.gearmanWorker) self.sendToBackground(self.monitor) def consume(self, gearman_worker, gearman_job): decrypted = self.decrypt(gearman_job.data) event = Event(decrypted) self.submit(event, self.pool.queue.outbox) return gearman_job.data def __encryptedJob(self, data): return self.cipher.decrypt(base64.b64decode(data)) def __plainTextJob(self, data): return data def _gearmanWorkerPatched(self): self.logging.info("Gearmand worker instance started") while self.loop(): try: with mock.patch.object(GearmanConnection, '_create_client_socket', create_client_socket): self.worker_instance = GearmanWorker(self.kwargs.hostlist) self.worker_instance.register_task(self.kwargs.queue, self.consume) self.worker_instance.work() except Exception as err: self.logging.warn( "Connection to gearmand failed. Reason: '%s'. Retry in 1 second." % err) sleep(1) finally: self.worker_instance.shutdown() def _gearmanWorkerNotPatched(self): self.logging.info("Gearmand worker instance started") while self.loop(): try: self.worker_instance = GearmanWorker(self.kwargs.hostlist) self.worker_instance.register_task(self.kwargs.queue, self.consume) self.worker_instance.work() except Exception as err: self.logging.warn( "Connection to gearmand failed. Reason: '%s'. Retry in 1 second." % err) sleep(1) finally: self.worker_instance.shutdown() def monitor(self): self.logging.info("Connection monitor started.") while self.loop(): sleep(5) for conn in self.worker_instance.connection_list: if not conn.connected: self.logging.error( "Connection to '%s' is dead. Trying to reconnect." % (conn.gearman_host)) try: conn.connect() self.logging.info("Connection to '%s' is restored." % (conn.gearman_host)) except Exception as err: self.logging.error( "Failed to reconnect to '%s'. Retry in 5 seconds. Reason: '%s'" % (conn.gearman_host, err)) else: self.logging.debug("Connection to '%s' is alive." % (conn.gearman_host))
time.sleep(10) print('waiting 10s... ') status = i.update() if status == 'running': print('running adding tag... ') import hashlib conn.create_tags([i.id], {"name": "ScrambleDB" +random_md5like_hash()}) # i.add_tag("Name","{{ScambleDB}}") else: print('Instance status: ' + status) # security_groups=[ config["cloud"]["security_groups"]]) return json.dumps(reservation) # Establish a connection with the job server on localhost--like the client, # multiple job servers can be used. worker = GearmanWorker(['127.0.0.1:4731']) # register_task will tell the job server that this worker handles the "echo" # task worker.register_task('cloud_cmd', cloud_cmd) # Once setup is complete, begin working by consuming any tasks available # from the job server print 'working...' worker.work()
def handle(self, *args, **options): print "worker started" worker = GearmanWorker(["127.0.0.1"]) worker.register_function("download", download) worker.work()