Example #1
0
    def _init(self):
        while True:
            while not self.is_available():
                logging.error(
                    "Gearman not available right now. Demon will sleep during {n} seconds"
                    .format(n=settings.GEARMAN_RECONNECT_TIMEOUT))
                gevent.sleep(settings.GEARMAN_RECONNECT_TIMEOUT)

            logging.debug("Gearman worker try to connect {hosts}".format(
                hosts=', '.join(settings.GEARMAN['hosts'])))

            try:
                gm_worker = GearmanWorker(settings.GEARMAN['hosts'])
                gm_worker.set_client_id("socket_io_gearman_" +
                                        str(OPTIONS.port))
                gm_worker.register_task("socket_io", GearmanListener.callback)
                logging.debug("Gearman worker was successfull created")

                return gm_worker
            except Exception, e:
                logging.error(
                    "Error while initiation gearman worker connect with message: {message}"
                    .format(message=e.message))
                logging.debug("Demon will be sleep during {n} seconds".format(
                    n=settings.GEARMAN_RECONNECT_TIMEOUT))
                gevent.sleep(settings.GEARMAN_RECONNECT_TIMEOUT)
Example #2
0
def main():
    gw = GearmanWorker(['127.0.0.1:4730'])
    gw.register_task("weibo_spider", weibo)
    gw.register_task("bbs_spider", bbs)
    gw.register_task("news_spider", news)
    gw.register_task("blog_spider", blog)
    gw.register_task("media_spider", media)
    gw.register_task("class_spider", same_class)
    gw.work()
class ImageRecognitionWorker:
    worker = None
    db = None

    def __init__(self):
        self.worker = GearmanWorker(['gearman.emag-bot.com'])
        self.worker.register_task('imgrecon', self.ImageRecognition)

        self.db = DBInterface()

    def Work(self):
        self.worker.work()

    def ComputeHistogram(self, url):
        ima = ImageAnalyser()
        ima.LoadImage(url, PathLocation.ONLINE)
        contour = ima.FindContour()
        im_e = ima.ExtractContour(contour)
        #ima.ShowImage(im_e)
        hist = ima.GetHueHistogram(im_e)

        return hist

    def ImageRecognition(self, worker, job):
        print("Got job: " + job.data)
        data = json.loads(job.data)
        hist = self.ComputeHistogram(data['url'])

        db_entries = self.db.QueryForLabels(data['labels'])

        accepted_entries = [[], []]
        i = 0
        for row in db_entries:
            #print(row)
            if row[3] is None or row[3] == '':
                continue

            data = json.loads(row[3])
            data = np.array([[d] for d in data], dtype=np.float32)
            res = cv2.compareHist(hist, data, cv2.HISTCMP_CORREL)

            if row[4] in accepted_entries[1]:
                idx = accepted_entries[1].index(row[4])
                if res > accepted_entries[0][idx]:
                    accepted_entries[0][idx] = res
            else:
                accepted_entries[0].append(res)
                accepted_entries[1].append(row[4])

        ret = [
            x for _, x in sorted(zip(accepted_entries[0], accepted_entries[1]),
                                 reverse=True)
        ]
        print(ret)

        return json.dumps(ret)
    def gearmanWorker(self):

        self.logging.info("Gearmand worker instance started")
        while self.loop():
            try:
                worker_instance=GearmanWorker(self.hostlist)
                worker_instance.register_task(self.queue, self.consume)
                worker_instance.work()
            except Exception as err:
                self.logging.warn ('Connection to gearmand failed. Reason: %s. Retry in 1 second.'%err)
                sleep(1)
Example #5
0
    def gearmanWorker(self):

        self.logging.info("Gearmand worker instance started")
        while self.loop():
            try:
                worker_instance = GearmanWorker(self.hostlist)
                worker_instance.register_task(self.queue, self.consume)
                worker_instance.work()
            except Exception as err:
                self.logging.warn(
                    'Connection to gearmand failed. Reason: %s. Retry in 1 second.'
                    % err)
                sleep(1)
def startFetchDependencyInfoWorkers():
    import FetchDependencyInfoWorker

    stopFetchDependencyInfoWorker()

    for i in range(0, 1):
        time.sleep(1)
        result = os.fork()

        if result == 0:
            workerPid = os.getpid()

            fp = open(FetchDependencyInfoWorkerPidFilePath, "a")
            fp.write(" %s" % workerPid)
            fp.close()

            worker = GearmanWorker([GearmanConfig.gearmanConnection])
            worker.register_task(JobList.Job_fetchDependencyInfo, FetchDependencyInfoWorker.doWork)
            worker.work()
    pass
Example #7
0
class RequestWorker:
    worker = None

    def __init__(self):
        self.worker = GearmanWorker(['gearman.emag-bot.com'])
        self.worker.register_task('getrawdata', self.GetRawData)

    def Work(self):
        self.worker.work()
    
    def GetRawData(self, worker, job):
        print("Got job: " + job.data)

        ima = ImageAnalyser()

        ima.LoadImage(job.data, PathLocation.ONLINE)
        contour = ima.FindContour()
        im_e = ima.ExtractContour(contour)
        #ima.ShowImage(im_e, "mask")

        hist = ima.GetHueHistogram(im_e)

        return json.dumps(hist.flatten().tolist())
        myj = Jenkins(jenkins_data['url'])
        job = myj.get_job(jenkins_data['job_id'])
        #job.invoke(securitytoken=token, block=block)
        job.invoke(invoke_pre_check_delay=0)
    except:
        rev="Not Happy!!!" 
    
    return rev

	
# Establish a connection with the job server on localhost--like the client,
# multiple job servers can be used.
worker = GearmanWorker(['localhost'])

# register_task will tell the job server that this worker handles the "echo"
# task
worker.set_client_id('your_worker_client_id_name')
worker.register_task('echo', task_listener_echo)
worker.register_task('build:pep8', task_listener_build)
worker.register_task('stop:jenkins_master.hp.com', task_listener_stop)
worker.register_task('bravo', task_listener_echo)
worker.register_task('reverse', task_listener_reverse)
worker.register_task('jenkins_invoke_job', task_listener_jenkins_invoke_job)

# Once setup is complete, begin working by consuming any tasks available
# from the job server
print 'working...'
worker.work()

# The worker will continue to run (waiting for new work) until exited by
# code or an external signal is caught
Example #9
0
        return False


def task_listener(gearman_worker, gearman_job):
    task_name, video_id, segment_id = pickle.loads(gearman_job.data)
    result = False

    if task_name == 'transcode':
        result = transcode_segment(video_id, segment_id)
    elif task_name == 'thumbnail':
        result = generate_thumbnail(video_id, segment_id)

    return pickle.dumps(result)


if __name__ == "__main__":
    # worker run

    logger.info("Setting up the worker.")
    gm_worker = GearmanWorker([GEARMAND_HOST_PORT])
    gm_worker.register_task(SEGMENT_TASK_NAME, task_listener)

    try:
        logger.info("Worker was set up successfully. Waiting for work.")
        gm_worker.work()

    except KeyboardInterrupt:
        gm_worker.shutdown()
        logger.info("Worker has shut down successfully. Bye.")
#        job = myj.get_job(jenkins_data['job_id'])
#        #job.invoke(securitytoken=token, block=block)
#        job.invoke(invoke_pre_check_delay=0)
#    except:
#        rev="Not Happy!!!" 
#    
#    return rev

	
# Establish a connection with the job server on localhost--like the client,
# multiple job servers can be used.
#worker = GearmanWorker(['localhost:4730'])
worker = GearmanWorker(['15.185.117.66:4730'])

# register_task will tell the job server that this worker handles the "echo"
# task
worker.set_client_id('your_worker_client_id_name')
worker.register_task('echo', task_listener_echo)
worker.register_task('build:pep8', task_listener_build)
#worker.register_task('stop:jenkins_master.hp.com', task_listener_stop)
worker.register_task('bravo', task_listener_echo)
worker.register_task('reverse', task_listener_reverse)
#worker.register_task('jenkins_invoke_job', task_listener_jenkins_invoke_job)

# Once setup is complete, begin working by consuming any tasks available
# from the job server
print 'working...'
worker.work()

# The worker will continue to run (waiting for new work) until exited by
# code or an external signal is caught
Example #11
0
#import pdb; pdb.set_trace()
from gearman import GearmanWorker


def message_recieved(gearman_worker, gearman_job):
    return "Message Recieved"


worker = GearmanWorker(["localhost:4730"])
worker.register_task('letsdosomething', message_recieved)
#print(dir(worker))
worker.work()
Example #12
0
    if file_id is not None:
        newfile = MEDIA_DIRECTORY + file_id
    else:
        newfile = MEDIA_DIRECTORY + gearman_job.data

    print "TASK RECEIVED FOR %s" % newfile  # @TODO timestamp

    # CONVERT TO WEBM
    cmd = "avconv -threads auto -i %s.mp4 -c:v libvpx -crf 10 \
           -b:v 768K -c:a libvorbis -deadline realtime \
           -cpu-used -10 %s.webm" % (newfile, newfile)
    cmd = cmd.encode('utf-8')

    result = os.system(cmd)

    if result != 0:
        print "TASK FAILURE"  # @TODO timestamp
        return "ERROR"  # @TODO return something more specific to the client

    os.chmod(newfile + ".webm", 0775)

    print "TASK COMPLETE"  # @TODO timestamp
    return "COMPLETE"  # @TODO return something more specific to the client


if not app.config.get('TESTING'):
    from gearman import GearmanWorker
    worker = GearmanWorker(GEARMAN_SERVERS)
    worker.register_task("generate_webm", generate_webm)
    worker.work()
        #TODO: Log the error
        return str(e)


################################################


def task_listener_email(gearman_worker, gearman_job):
    #TODO: Log the call and params
    cleaned_words, reply, meta, infile, sample_text = process_words(
        gearman_job.data)
    #TODO: Log the call and params
    res = inverted_index(cleaned_words, reply, meta, infile, sample_text)
    print res
    #TODO: Log the result
    if res != 'Ok':
        # Log the stack trace and exception
        return 'NotOk'
    else:
        return res


if __name__ == "__main__":
    # TODO:  Daemonize
    # TODO: Ensure there are not multiple monitors for the same location (check for lock file)
    # TODO: Make the strings config driven
    # TODO: Have a meaningful try-except block
    gm_worker.set_client_id('inv-index-worker')
    gm_worker.register_task('invindex', task_listener_email)
    gm_worker.work()
Example #14
0
        activity_info_config.update({'_id': elem['_id']},
                            {'$set': {'is_running': False,
                                      '__MODIFY_TIME__': datetime.datetime.now(utc)}})

    return 'successful received kanjia'

def task_city_tag(GermanWorker, job):
    print ('city_tag work received')
    print job.data
    activity_info_config = mapreduce['city_tag_config']
    print (activity_info_config.find_one())
    for elem in activity_info_config.find({"__REMOVED__": False,
                                           "is_running": False}):
        activity_info_config.update({'_id': elem['_id']},
                                    {'$set': {'is_running': True,
                                              '__MODIFY_TIME__': datetime.datetime.now(utc)}})

        begin = elem['begin']
        end = elem['end']
        detail = elem['collection_name']
        one = CityTag(begin_name=begin,end_name=end,collection_name=detail)
        result = one.get_city_info()
        print result
    return 'successful finished city_tag job'


new_worker = GearmanWorker(['192.168.5.41:4730'])
new_worker.register_task("task_city", task_city_tag)
new_worker.register_task("task_kanjia", task_kanjia)
new_worker.register_task("task_gender", task_gender)
new_worker.work()
Example #15
0
def task_backtest(gearman_worker, gearman_job):
    symbol = ['000001', '603993']
    bars = bindata.BackTestData(bindata.raw)
    # Apply our current strategy on the chosen stock pool
    rfs = CurrentStrategy(symbol, bars)
    # specify constraints, here is the default one
    cons = Constraint()
    # specify a naive optimizer
    opt = NaiveOptimizer(cons)

    data = json.loads(gearman_job.data)
    function_list = {}
    signal_generator = compile(data["code"], '', 'exec')
    exec signal_generator in function_list

    # Create a portfolio
    portfolio = MarketOnClosePortfolio(symbol, bars, rfs, \
                opt, initial_capital=1000000.0)
    portfolio.strategy.sig_generator = function_list["generate_signals"]
    # Backtest our portfolio and store result in book
    book = portfolio.backtest_portfolio(worker=gearman_worker, job=gearman_job)
    ret = book.nav_to_json()
    return json.dumps(ret)


if __name__ == "__main__":
    # gm_worker.set_client_id is optional
    gm_worker.set_client_id('python-worker')
    gm_worker.register_task('backtest', task_backtest)
    # Enter our work loop and call gm_worker.after_poll() after each time we timeout/see socket activity
    gm_worker.work()
Example #16
0
    def close_all(self):
        self.browser.close()
        self.browser.quit()
        self.log(u'Browser process was ended')
        self.log(u'')

    def wait_for(self, by, el):
        element = WebDriverWait(self.browser, 10).until(
            EC.presence_of_element_located((by, el)))
        return element

    def log(self, text):
        if self.debug:
            log_date = datetime.datetime.now()
            formatted_date = log_date.__format__("%d-%m-%Y %H:%M:%S")
            print("[{}] {}".format(formatted_date, text))


def parse_friends(worker, job):
    job_arr = json.loads(job.data)
    br = Browser(debug, head)
    br.auth(job_arr['auth'])
    job_result = br.get_users_friends(job_arr['users'])
    br.close_all()
    return json.dumps(job_result)


worker = GearmanWorker([args.gearman_host])
worker.register_task('parseFriends', parse_friends)
worker.work()
Example #17
0
from pyModbusTCP.client import ModbusClient
from gearman import GearmanWorker
import json

def reader(worker, job):
    c = ModbusClient(host="localhost", port=502)

    if not c.is_open() and not c.open():
        print("unable to connect to host")

    if c.is_open():

        holdingRegisters = c.read_holding_registers(1, 4)

        # Imagine we've "energy" value in position 1 with two words
        energy = (holdingRegisters[0] << 16) | holdingRegisters[1]

        # Imagine we've "power" value in position 3 with two words
        power = (holdingRegisters[2] << 16) | holdingRegisters[3]

        out = {"energy": energy, "power": power}
        return json.dumps(out)
    return None

worker = GearmanWorker(['127.0.0.1'])

worker.register_task('modbusReader', reader)

print 'working...'
worker.work()
Example #18
0
class GearmanIn(Actor):

    '''**Consumes events/jobs from  Gearmand.**

    Consumes jobs from a Gearmand server.
    When secret is none, no decryption is done.


    Parameters:

        - hostlist(list)(["localhost:4730"])
           |  A list of gearmand servers.  Each entry should have
           |  format host:port.

        - secret(str)(None)
           |  The AES encryption key to decrypt Mod_gearman messages.

        - workers(int)(1)
           |  The number of gearman workers within 1 process.

        - queue(str)(wishbone)
           |  The queue to consume jobs from.

        - enable_keepalive(bool)(False)
           |  Attempt to monkey patch the gearmand module to enable socket
           |  keepalive.


    Queues:

        - outbox:   Outgoing events.

    '''

    def __init__(self, actor_config, hostlist=["localhost:4730"], secret=None, workers=1, queue="wishbone", enable_keepalive=False):
        Actor.__init__(self, actor_config)

        self.pool.createQueue("outbox")
        self.background_instances = []

        if self.kwargs.secret is None:
            self.decrypt = self.__plainTextJob
        else:
            key = self.kwargs.secret[0:32]
            self.cipher = AES.new(key + chr(0) * (32 - len(key)))
            self.decrypt = self.__encryptedJob

    def preHook(self):

        if self.kwargs.enable_keepalive:
            self.logging.info("Requested to monkey patch Gearmand")
            if gearman_version == "2.0.2":
                self.logging.info("Detected gearman version 2.0.2, patching sockets with SO_KEEPALIVE enabled.")
                self.gearmanWorker = self._gearmanWorkerPatched
            else:
                self.logging.warning("Did not detect gearman version 2.0.2. Not patching , patching sockets with keepalive enabled.")
                self.gearmanWorker = self._gearmanWorkerNotPatched
        else:
            self.gearmanWorker = self._gearmanWorkerNotPatched

        for _ in range(self.kwargs.workers):
            self.sendToBackground(self.gearmanWorker)

        self.sendToBackground(self.monitor)

    def consume(self, gearman_worker, gearman_job):

        decrypted = self.decrypt(gearman_job.data)
        event = Event(decrypted)
        self.submit(event, self.pool.queue.outbox)
        return gearman_job.data

    def __encryptedJob(self, data):
        return self.cipher.decrypt(base64.b64decode(data))

    def __plainTextJob(self, data):
        return data

    def _gearmanWorkerPatched(self):

        self.logging.info("Gearmand worker instance started")
        while self.loop():
            try:
                with mock.patch.object(GearmanConnection, '_create_client_socket', create_client_socket):
                    self.worker_instance = GearmanWorker(self.kwargs.hostlist)
                    self.worker_instance.register_task(self.kwargs.queue, self.consume)
                    self.worker_instance.work()
            except Exception as err:
                self.logging.warn("Connection to gearmand failed. Reason: '%s'. Retry in 1 second." % err)
                sleep(1)
            finally:
                self.worker_instance.shutdown()

    def _gearmanWorkerNotPatched(self):

        self.logging.info("Gearmand worker instance started")
        while self.loop():
            try:
                self.worker_instance = GearmanWorker(self.kwargs.hostlist)
                self.worker_instance.register_task(self.kwargs.queue, self.consume)
                self.worker_instance.work()
            except Exception as err:
                self.logging.warn("Connection to gearmand failed. Reason: '%s'. Retry in 1 second." % err)
                sleep(1)
            finally:
                self.worker_instance.shutdown()

    def monitor(self):

        self.logging.info("Connection monitor started.")
        while self.loop():
            sleep(5)
            for conn in self.worker_instance.connection_list:
                if not conn.connected:
                    self.logging.error("Connection to '%s' is dead.  Trying to reconnect." % (conn.gearman_host))
                    try:
                        conn.connect()
                        self.logging.info("Connection to '%s' is restored." % (conn.gearman_host))
                    except Exception as err:
                        self.logging.error("Failed to reconnect to '%s'. Retry in 5 seconds. Reason: '%s'" % (conn.gearman_host, err))
                else:
                    self.logging.debug("Connection to '%s' is alive." % (conn.gearman_host))
Example #19
0
        time.sleep(10)
        print('waiting 10s... ')
        status = i.update()
    if status == 'running':
        print('running adding tag... ')
        import hashlib
        conn.create_tags([i.id],
                         {"name": "ScrambleDB" + random_md5like_hash()})
        # i.add_tag("Name","{{ScambleDB}}")

    else:
        print('Instance status: ' + status)

    #     security_groups=[ config["cloud"]["security_groups"]])

    return json.dumps(reservation)


# Establish a connection with the job server on localhost--like the client,
# multiple job servers can be used.
worker = GearmanWorker(['127.0.0.1:4731'])

# register_task will tell the job server that this worker handles the "echo"
# task
worker.register_task('cloud_cmd', cloud_cmd)

# Once setup is complete, begin working by consuming any tasks available
# from the job server
print 'working...'
worker.work()
Example #20
0
    topN_list = [{item['word_doc_hash'].split('_')[1]:item} for item in topN]
    
    # NOTE: Persist topN into redis
    redis_server.set(current_topn_pattern, str(topN_list))

    # NOTE: remove the tbp_<word> from the to be processed list
    redis_server.delete(word) 
    return 'Ok'   

########################################################################


def task_listener_topncompute(gearman_worker, gearman_job):
    #TODO: Log the call and params
    res = topncompute(gearman_job.data)
    print res
    #TODO: Log the result
    if res != 'Ok':
        # Log the stack trace and exception
        return 'NotOk'
    else:
        return res

if __name__ == "__main__":
    # TODO:  Daemonize 
    # TODO: Ensure there are not multiple monitors for the same location (check for lock file)
    # TODO: Make the strings config driven
    # TODO: Have a meaningful try-except block
    gm_worker.set_client_id('topn-worker')
    gm_worker.register_task('topncompute', task_listener_topncompute)
    gm_worker.work()
Example #21
0
class GearmanIn(Actor):
    '''**Consumes events/jobs from  Gearmand.**

    Consumes jobs from a Gearmand server.
    When secret is none, no decryption is done.


    Parameters:

        - hostlist(list)(["localhost:4730"])
           |  A list of gearmand servers.  Each entry should have
           |  format host:port.

        - secret(str)(None)
           |  The AES encryption key to decrypt Mod_gearman messages.

        - workers(int)(1)
           |  The number of gearman workers within 1 process.

        - queue(str)(wishbone)
           |  The queue to consume jobs from.

        - enable_keepalive(bool)(False)
           |  Attempt to monkey patch the gearmand module to enable socket
           |  keepalive.


    Queues:

        - outbox:   Outgoing events.

    '''
    def __init__(self,
                 actor_config,
                 hostlist=["localhost:4730"],
                 secret=None,
                 workers=1,
                 queue="wishbone",
                 enable_keepalive=False):
        Actor.__init__(self, actor_config)

        self.pool.createQueue("outbox")
        self.background_instances = []

        if self.kwargs.secret is None:
            self.decrypt = self.__plainTextJob
        else:
            key = self.kwargs.secret[0:32]
            self.cipher = AES.new(key + chr(0) * (32 - len(key)))
            self.decrypt = self.__encryptedJob

    def preHook(self):

        if self.kwargs.enable_keepalive:
            self.logging.info("Requested to monkey patch Gearmand")
            if gearman_version == "2.0.2":
                self.logging.info(
                    "Detected gearman version 2.0.2, patching sockets with SO_KEEPALIVE enabled."
                )
                self.gearmanWorker = self._gearmanWorkerPatched
            else:
                self.logging.warning(
                    "Did not detect gearman version 2.0.2. Not patching , patching sockets with keepalive enabled."
                )
                self.gearmanWorker = self._gearmanWorkerNotPatched
        else:
            self.gearmanWorker = self._gearmanWorkerNotPatched

        for _ in range(self.kwargs.workers):
            self.sendToBackground(self.gearmanWorker)

        self.sendToBackground(self.monitor)

    def consume(self, gearman_worker, gearman_job):

        decrypted = self.decrypt(gearman_job.data)
        event = Event(decrypted)
        self.submit(event, self.pool.queue.outbox)
        return gearman_job.data

    def __encryptedJob(self, data):
        return self.cipher.decrypt(base64.b64decode(data))

    def __plainTextJob(self, data):
        return data

    def _gearmanWorkerPatched(self):

        self.logging.info("Gearmand worker instance started")
        while self.loop():
            try:
                with mock.patch.object(GearmanConnection,
                                       '_create_client_socket',
                                       create_client_socket):
                    self.worker_instance = GearmanWorker(self.kwargs.hostlist)
                    self.worker_instance.register_task(self.kwargs.queue,
                                                       self.consume)
                    self.worker_instance.work()
            except Exception as err:
                self.logging.warn(
                    "Connection to gearmand failed. Reason: '%s'. Retry in 1 second."
                    % err)
                sleep(1)
            finally:
                self.worker_instance.shutdown()

    def _gearmanWorkerNotPatched(self):

        self.logging.info("Gearmand worker instance started")
        while self.loop():
            try:
                self.worker_instance = GearmanWorker(self.kwargs.hostlist)
                self.worker_instance.register_task(self.kwargs.queue,
                                                   self.consume)
                self.worker_instance.work()
            except Exception as err:
                self.logging.warn(
                    "Connection to gearmand failed. Reason: '%s'. Retry in 1 second."
                    % err)
                sleep(1)
            finally:
                self.worker_instance.shutdown()

    def monitor(self):

        self.logging.info("Connection monitor started.")
        while self.loop():
            sleep(5)
            for conn in self.worker_instance.connection_list:
                if not conn.connected:
                    self.logging.error(
                        "Connection to '%s' is dead.  Trying to reconnect." %
                        (conn.gearman_host))
                    try:
                        conn.connect()
                        self.logging.info("Connection to '%s' is restored." %
                                          (conn.gearman_host))
                    except Exception as err:
                        self.logging.error(
                            "Failed to reconnect to '%s'. Retry in 5 seconds. Reason: '%s'"
                            % (conn.gearman_host, err))
                else:
                    self.logging.debug("Connection to '%s' is alive." %
                                       (conn.gearman_host))
Example #22
0
    '''find out information in the bundle'''
    data = bundle.split('||')
    result = {}
    if data:
        if data[0]:
            result['nid'] = data[0]
            result['content'] = data[1]
            return result
        else:
            return None
    return None

def task_add(gearman_worker, gearman_job):
    '''insert valid item to the database'''
    job_data = parse(gearman_job.data)
    mongo_col.save(job_data)
    return 'okay'

def task_edit(gearman_worker, gearman_job):
    '''update valid item with a new value'''
    # TODO add try-except
    job_data = parse(gearman_job.data)
    print str(job_data)
    mongo_col.update({'nid':job_data['nid']}, {'$set':{'content':job_data['content']}})
    return 'okay'

gworker.register_task('add', task_add)
gworker.register_task('edit', task_edit)

gworker.work()
Example #23
0
        newfile = MEDIA_DIRECTORY + file_id
    else:
        newfile = MEDIA_DIRECTORY + gearman_job.data

    print "TASK RECEIVED FOR %s" % newfile # @TODO timestamp

    # CONVERT TO WEBM
    cmd = "avconv -threads auto -i %s.mp4 -c:v libvpx -crf 10 \
           -b:v 768K -c:a libvorbis -deadline realtime \
           -cpu-used -10 %s.webm" % (newfile, newfile)
    cmd = cmd.encode('utf-8')

    result = os.system(cmd)
    
    if result != 0:
        print "TASK FAILURE" # @TODO timestamp
        return "ERROR" # @TODO return something more specific to the client
                
    os.chmod(newfile + ".webm", 0775)

    print "TASK COMPLETE" # @TODO timestamp
    return "COMPLETE" # @TODO return something more specific to the client



if not app.config.get('TESTING'):
  from gearman import GearmanWorker
  worker = GearmanWorker(GEARMAN_SERVERS)
  worker.register_task("generate_webm", generate_webm)
  worker.work()
        return False


def task_listener(gearman_worker, gearman_job):
    task_name, video_id, segment_id = pickle.loads(gearman_job.data)
    result = False

    if task_name == 'transcode':
        result = transcode_segment(video_id, segment_id)
    elif task_name == 'thumbnail':
        result = generate_thumbnail(video_id, segment_id)

    return pickle.dumps(result)


if __name__ == "__main__":
    # worker run

    logger.info("Setting up the worker.")
    gm_worker = GearmanWorker([GEARMAND_HOST_PORT])
    gm_worker.register_task(SEGMENT_TASK_NAME, task_listener)

    try:
        logger.info("Worker was set up successfully. Waiting for work.")
        gm_worker.work()

    except KeyboardInterrupt:
        gm_worker.shutdown()
        logger.info("Worker has shut down successfully. Bye.")
Example #25
0
# coding=utf-8

from gearman import GearmanWorker
from FromMongo import data_cluster
import json

def task_callback(gearman_worker,job):
    a = json.loads(job.data)
    from_col = a['from_col']
    fill_type = a['fill_type']
    attr_weight = a['attr_weight']
    needtransfer = a['needtransfer']
    Parameters = a['Parameters']
    demo = data_cluster(from_col,fill_type,attr_weight,needtransfer,Parameters)
    return 'successful received'

new_worker = GearmanWorker(['192.168.5.41:4730'])
new_worker.register_task("data_cluster", task_callback)
new_worker.work()
Example #26
0
# coding = utf-8
import os
import gearman
import math
from gearman import GearmanWorker
from lbs_class import user_lbs
import json

def task_callback(gearman_worker, job):    
    print job.data
    a = json.loads(job.data)
    store = a['store']
    dist = a['dist']
    center = [a['center']['lng'],a['center']['lat']]
    start = a['time_start']
    end = a['time_end']
    klx = user_lbs(store,dist,center,start,end)
    return 'successful received'

new_worker = GearmanWorker(['192.168.5.41:4730'])
new_worker.register_task("user_lbs", task_callback)
new_worker.work()
Example #27
0
        return self.on_job_exception(current_job, sys.exc_info())
    self.task_count +=1
    if self.task_count >= 50:
        print "max tasks reached. exiting"
        sys.exit()

    return self.on_job_complete(current_job, job_result)


GearmanWorker.task_count = 0
GearmanWorker.on_job_exception = on_job_exception
GearmanWorker.on_job_execute = on_job_execute

worker = GearmanWorker(JOBSERVER_LIST)

worker.set_client_id("working_on_the_djangoroad")

worker.register_task("add", add)
worker.register_task("pre_schedule", pre_schedule)
worker.register_task("row_calculator", row_calculator)
worker.register_task("row_aggregator", row_aggregator)

worker.register_task("echo", echo)


print "working"
#print dir(worker)
#print worker.worker_abilities
worker.work()

Example #28
0
        myj = Jenkins(jenkins_data['url'])
        job = myj.get_job(jenkins_data['job_id'])
        #job.invoke(securitytoken=token, block=block)
        job.invoke(invoke_pre_check_delay=0)
    except:
        rev = "Not Happy!!!"

    return rev


# Establish a connection with the job server on localhost--like the client,
# multiple job servers can be used.
worker = GearmanWorker(['localhost'])

# register_task will tell the job server that this worker handles the "echo"
# task
worker.set_client_id('your_worker_client_id_name')
worker.register_task('echo', task_listener_echo)
worker.register_task('build:pep8', task_listener_build)
worker.register_task('stop:jenkins_master.hp.com', task_listener_stop)
worker.register_task('bravo', task_listener_echo)
worker.register_task('reverse', task_listener_reverse)
worker.register_task('jenkins_invoke_job', task_listener_jenkins_invoke_job)

# Once setup is complete, begin working by consuming any tasks available
# from the job server
print 'working...'
worker.work()

# The worker will continue to run (waiting for new work) until exited by
# code or an external signal is caught
Example #29
0
class Alligator(object):
    def __init__(self):
        config = json.load(open('config.json','r'))
        self.gm_worker = GearmanWorker([  config['gearmanip']+':'+str(config['gearmanport']) ])
        self.gm_worker.register_task(str(config["gearmanworker_apiai"]),self.run)
        self.wolframkey = config["wolfram_key"]
    def run(self,gearman_worker,gearman_job):
        result,lastresult = "",[]
        try:
            request = json.loads(gearman_job.data)
            session = request['uId']
            channel = request['channel']
            query = request['query']
            print "QUERY : ",query
            language = 'en'
            apirequest = "http://api.wolframalpha.com/v2/query?input="+query+"&appid="+self.wolframkey+"&format=image"
            value = requests.get(apirequest)
            tags = ET.fromstring(value._content)
            imagelistings = []
            for each in tags:
                if 'title' in each.attrib:
                    #print 'title : ',each.attrib['title']
                    if each.attrib['title'].lower().strip() == "result".lower():
                        text = each.find('subpod')
                        for all in text:
                            #print "all : ",all.attrib
                            if 'title' in all.attrib:
                                if channel == 'ui':
                                    result = result + all.attrib['title'] + "<br>"
                                else:
                                    result = result + all.attrib['title'] + "\n"
                                imagelistings.append(all.attrib['src'])
                        break
                    elif "input" not in each.attrib['title'].lower().strip():
                        text = each.find('subpod')
                        for all in text:
                            #print "all : ",all.attrib
                            if 'title' in all.attrib:
                                if all.attrib['title'].strip():
                                    print all.attrib
                                    lastresult.append((each.attrib['title'],all.attrib['title']))
                                if 'src' in all.attrib:
                                    print "title :",each.attrib['title']
                                    imagelistings.append(all.attrib['src'])
                else:
                    print "attribute without title : ",each.attrib
            imageflag = False
            print "RESULT : ",result
            if not result.strip():
                if channel == 'ui':
                    if lastresult:
                        print "in lastresult"
                        currenttitle = ""
                        for each,all in lastresult:
                            if each != currenttitle:
                                currenttitle = each
                                result = result + "<br><b><u> "+each+": </u></b>"
                            result = result + "<br> "+all
                    elif imagelistings:
                        imageflag = True
                        print "in imagelistings",imagelistings
                        for each in imagelistings:
                            result = result + '<img src = "'+each+'" /> <br>'
                    else:
                        result = self.randomResponses()
                else:
                    if lastresult:
                        print "in lastresult"
                        currenttitle = ""
                        for each,all in lastresult:
                            if each != currenttitle and each.lower().strip() != "response":
                                currenttitle = each
                                result = result + "\n"+each+": "
                            result = result + "\n \t "+all
                    else:
                        result = self.randomResponses()
            print "RESULT : ",result
            if "data not available" in result:
                result = self.randomResponses()
            if "wolfram" in result.lower() and not imageflag:
                if "stephen" not in result.lower():
                    resultlist = result.split()
                    for each,value in enumerate(resultlist):
                        print each,value
                        if 'wolfram' in resultlist[each].lower():
                            resultlist[each] = "Alligator"
                            result = " ".join(resultlist)
                else:
                    result = result.replace("Stephen Wolfram","Kannan Piedy")
            #result = '<img src = "http://localhost:7000/image/logo.png" />'
            return json.dumps({'result':result,'sessionId':channel+'_'+session})
        except Exception,e:
            print "Exception in Run : ",e
      time.sleep(10)
      print('waiting 10s... ')
      status = i.update()
   if status == 'running':
      print('running adding tag... ')
      import hashlib
      conn.create_tags([i.id], {"name": "ScrambleDB" +random_md5like_hash()})
      # i.add_tag("Name","{{ScambleDB}}")
      
   else:
      print('Instance status: ' + status)
    
   #     security_groups=[ config["cloud"]["security_groups"]])
   
   return json.dumps(reservation)    

# Establish a connection with the job server on localhost--like the client,
# multiple job servers can be used.
worker = GearmanWorker(['127.0.0.1:4731'])

# register_task will tell the job server that this worker handles the "echo"
# task
worker.register_task('cloud_cmd', cloud_cmd)

# Once setup is complete, begin working by consuming any tasks available
# from the job server
print 'working...'
worker.work()


Example #31
0
def task_backtest(gearman_worker, gearman_job):
    symbol = ['000001', '603993']
    bars = bindata.BackTestData(bindata.raw)
    # Apply our current strategy on the chosen stock pool
    rfs = CurrentStrategy(symbol, bars)
    # specify constraints, here is the default one
    cons = Constraint()
    # specify a naive optimizer
    opt = NaiveOptimizer(cons)

    data = json.loads(gearman_job.data)
    function_list = {}
    signal_generator = compile(data["code"], '', 'exec')
    exec signal_generator in function_list

    # Create a portfolio
    portfolio = MarketOnClosePortfolio(symbol, bars, rfs, \
                opt, initial_capital=1000000.0)
    portfolio.strategy.sig_generator = function_list["generate_signals"]
    # Backtest our portfolio and store result in book
    book = portfolio.backtest_portfolio(worker=gearman_worker, job=gearman_job)
    ret = book.nav_to_json()
    return json.dumps(ret)


if __name__ == "__main__":
    # gm_worker.set_client_id is optional
    gm_worker.set_client_id('python-worker')
    gm_worker.register_task('backtest', task_backtest)
    # Enter our work loop and call gm_worker.after_poll() after each time we timeout/see socket activity
    gm_worker.work()