Пример #1
0
def startRPCServer():
    gm_worker = gearman.GearmanWorker([django_settings.GEARMAN_SERVER])
    hostID = gethostname() + "_MCPServer"
    gm_worker.set_client_id(hostID)

    # The tasks registered in this worker should not block.
    gm_worker.register_task(
        "approveJob", job_approve_handler)
    gm_worker.register_task(
        "getJobsAwaitingApproval", job_awaiting_approval_handler)
    gm_worker.register_task(
        "packageCreate", package_create_handler)
    gm_worker.register_task(
        "approveTransferByPath", approve_transfer_by_path_handler)
    gm_worker.register_task(
        "approvePartialReingest", approve_partial_reingest_handler)
    gm_worker.register_task(
        "getProcessingConfigFields", get_processing_config_fields_handler)

    failMaxSleep = 30
    failSleep = 1
    failSleepIncrementor = 2
    while True:
        try:
            gm_worker.work()
        except gearman.errors.ServerUnavailable:
            time.sleep(failSleep)
            if failSleep < failMaxSleep:
                failSleep += failSleepIncrementor
Пример #2
0
 def __init__(self):
     #self.worker=CustomGearmanWorker(['sandbox:4730'],max_jobs=1)
     self.worker = gearman.GearmanWorker(['sandbox:4730'])
     self.perfdata = GearmanPerfData(
         secret=
         'Aloh9uibshojeF8oAhyo3eefGu5ohr3iDeek4ehamaM9eisoas6OoveiareQuo0i')
     self.worker.register_task('perfdata', self.perfdata.getData)
Пример #3
0
def start_gearman_worker(supported_modules):
    """Setup a gearman client, for the thread."""
    gm_worker = gearman.GearmanWorker([django_settings.GEARMAN_SERVER])
    host_id = '{}_{}'.format(gethostname(), os.getpid())
    gm_worker.set_client_id(host_id)
    task_handler = partial(execute_command, supported_modules)
    for client_script in supported_modules:
        logger.info('Registering: %s', client_script)
        gm_worker.register_task(client_script, task_handler)
    fail_max_sleep = 30
    fail_sleep = 1
    fail_sleep_incrementor = 2
    while True:
        try:
            gm_worker.work()
        except gearman.errors.ServerUnavailable as inst:
            logger.error(
                'Gearman server is unavailable: %s. Retrying in %d'
                ' seconds.', inst.args, fail_sleep)
            time.sleep(fail_sleep)
            if fail_sleep < fail_max_sleep:
                fail_sleep += fail_sleep_incrementor
        except Exception as e:
            # Generally execute_command should have caught and dealt with any
            # errors gracefully, but we should never let an exception take down
            # the whole process, so one last catch-all.
            logger.exception(
                'Unexpected error while handling gearman job: %s.'
                ' Retrying in %d seconds.', e, fail_sleep)
            time.sleep(fail_sleep)
            if fail_sleep < fail_max_sleep:
                fail_sleep += fail_sleep_incrementor
Пример #4
0
 def __init__(self, config):
     self.config = config
     self.worker = gearman.GearmanWorker([
         '%s:%s' %
         (self.config.get_gearman_host(), self.config.get_gearman_port())
     ])
     self.worker.set_client_id(self.config.get_worker_name())
Пример #5
0
def dowork():
    import os
    gw = gearman.GearmanWorker(Servers)
    gw.set_client_id('work_%s' % os.getpid())  #optional
    gw.register_task(workfn, task_fans)
    # Enter our work loop
    gw.work()
Пример #6
0
def startRPCServer():
    logger = logging.getLogger("archivematica")
    logger.addHandler(
        GroupWriteRotatingFileHandler(
            "/var/log/archivematica/MCPServer/MCPServer.log",
            maxBytes=4194304))

    gm_worker = gearman.GearmanWorker(
        [archivematicaMCP.config.get('MCPServer', 'GearmanServerWorker')])
    hostID = gethostname() + "_MCPServer"
    gm_worker.set_client_id(hostID)
    gm_worker.register_task("approveJob", gearmanApproveJob)
    gm_worker.register_task("getJobsAwaitingApproval",
                            gearmanGetJobsAwaitingApproval)
    failMaxSleep = 30
    failSleep = 1
    failSleepIncrementor = 2
    while True:
        try:
            gm_worker.work()
        except gearman.errors.ServerUnavailable as inst:
            #print >>sys.stderr, inst.args
            #print >>sys.stderr, "Retrying in %d seconds." % (failSleep)
            time.sleep(failSleep)
            if failSleep < failMaxSleep:
                failSleep += failSleepIncrementor
Пример #7
0
    def __init__(self, redisClient, gearmanHost):

        BasicGeodisWorker.__init__(self, redisClient)

        self.worker = gearman.GearmanWorker([gearmanHost])

        self.worker.register_task('geodis_lookup_city', self.lookup)
        print "Registered worker"
Пример #8
0
	def start_gearman(cls, gearman_servers):
		"""
		Connect to gearmand and fire up a worker
		"""
		gearman_worker = gearman.GearmanWorker(gearman_servers or ['localhost:4730'])
		gearman_worker.register_task('ponyexpress', cls.from_gearman)
		gearman_worker.work()
		return True
Пример #9
0
    def handle(self, *args, **options):
        task_name = 'task_%s' % options['queue']
        if task_name not in dir(self):
            sys.exit(1)

        task_func = lambda x, y: getattr(Command, task_name)(self, x, y)
        gm_worker = gearman.GearmanWorker(settings.GEARMAN_JOB_SERVERS)
        gm_worker.register_task(options['queue'], task_func)
        workers_logger.info('Started worker with tasks: %s' % task_name)
        gm_worker.work()
Пример #10
0
 def run(self):
     threading.Thread(target=self._shutdown_thread).start()
     self.worker = gearman.GearmanWorker(self.rpc_server.host_list)
     for service_name, (_, initializer) in six.iteritems(
             self.rpc_server.service_mappings):
         if initializer is not None:
             initializer()
         self.worker.register_task(service_name, self.gm_task_callback)
     try:
         self.worker.work(5.0)
     except gearman.errors.ServerUnavailable:
         pass
Пример #11
0
    def handle(self, *args, **options):
        gm_worker = gearman.GearmanWorker(settings.GEARMAN_JOB_SERVERS)
        registered_tasks = []
        for task_name in dir(self):
            if task_name.startswith('task_'):
                task_name = str(task_name)
                t_name = task_name.replace('task_', '')
                task_func = lambda i: (lambda x, y: getattr(Command, i)
                                       (self, x, y))
                gm_worker.register_task(t_name, task_func(task_name))
                registered_tasks.append(t_name)

        workers_logger.info('Started worker with tasks: %s' %
                            ', '.join(registered_tasks))
        gm_worker.work()
Пример #12
0
 def __init__(self, task_limit=10):
     with open('config.yml', 'r') as cnfg:
         self.config = yaml.load(cnfg, Loader=yaml.FullLoader)
     self._task_limit = task_limit
     self._count_current_tasks = 0
     self._current_tasks = []
     self._timer = 0
     self.gearman_client = gearman.GearmanClient(
         [self.config.get('GJS_1').get('job_server_url')])
     self.gearman_worker = gearman.GearmanWorker(
         [self.config.get('GJS_1').get('job_server_url')])
     self.gearman_worker.register_task(
         self.config.get('worker_ids').get('async_governer'), self.governer)
     ready_message = pyfiglet.figlet_format("Worker Ready!!")
     print(ready_message)
Пример #13
0
    def handle(self, *args, **options):
        logger.info('Starting worker')
        task_name = 'task_%s' % options['queue']
        logger.info('Task: %s' % task_name)
        if task_name not in dir(self):
            logger.info("Wow.. That's crazy! Maybe try an existing queue?")
            sys.exit(1)

        task_func = lambda x, y: getattr(Command, task_name)(self, x, y)
        logger.info('Initializing gm_worker')
        gm_worker = gearman.GearmanWorker(settings.GEARMAN_JOB_SERVERS)
        logger.info('Registering task %s, function %s' % (task_name, task_func))
        gm_worker.register_task(options['queue'], task_func)
        logger.info('Starting work')
        gm_worker.work()
        logger.info('Ended work')
Пример #14
0
def startRPCServer():
    gm_worker = gearman.GearmanWorker([django_settings.GEARMAN_SERVER])
    hostID = gethostname() + "_MCPServer"
    gm_worker.set_client_id(hostID)
    gm_worker.register_task("approveJob", gearmanApproveJob)
    gm_worker.register_task("getJobsAwaitingApproval",
                            gearmanGetJobsAwaitingApproval)
    failMaxSleep = 30
    failSleep = 1
    failSleepIncrementor = 2
    while True:
        try:
            gm_worker.work()
        except gearman.errors.ServerUnavailable:
            time.sleep(failSleep)
            if failSleep < failMaxSleep:
                failSleep += failSleepIncrementor
Пример #15
0
 def handle(self, *args, **options):
     # N.B. don't take out the print statements as they're
     # very very very very very very very very very very
     # helpful in debugging supervisor+worker+gearman
     self.write_stdout('Starting worker\n')
     task_name = 'task_%s' % options['queue']
     self.write_stdout('Task: %s\n' % task_name)
     if task_name not in dir(self):
         self.write_stdout("Wow.. That's crazy! Maybe try an existing queue?\n")
         sys.exit(1)
     task_func = lambda x, y: getattr(Command, task_name)(self, x, y)
     self.write_stdout('Initializing gm_worker\n')
     gm_worker = gearman.GearmanWorker(settings.GEARMAN_JOB_SERVERS)
     self.write_stdout('Registering task %s, function %s\n' % (task_name, task_func))
     gm_worker.register_task(options['queue'], task_func)
     self.write_stdout('Starting work\n')
     gm_worker.work()
     self.write_stdout('Ended work\n')
Пример #16
0
def startRPCServer():
    gm_worker = gearman.GearmanWorker(
        [archivematicaMCP.config.get('MCPServer', 'GearmanServerWorker')])
    hostID = gethostname() + "_MCPServer"
    gm_worker.set_client_id(hostID)
    gm_worker.register_task("approveJob", gearmanApproveJob)
    gm_worker.register_task("getJobsAwaitingApproval",
                            gearmanGetJobsAwaitingApproval)
    failMaxSleep = 30
    failSleep = 1
    failSleepIncrementor = 2
    while True:
        try:
            gm_worker.work()
        except gearman.errors.ServerUnavailable:
            time.sleep(failSleep)
            if failSleep < failMaxSleep:
                failSleep += failSleepIncrementor
def startThread(threadNumber):
    """Setup a gearman client, for the thread."""
    gm_worker = gearman.GearmanWorker(
        [config.get('MCPClient', "MCPArchivematicaServer")])
    hostID = gethostname() + "_" + threadNumber.__str__()
    gm_worker.set_client_id(hostID)
    for key in supportedModules.iterkeys():
        printOutputLock.acquire()
        print "registering:", '"' + key + '"'
        printOutputLock.release()
        gm_worker.register_task(key, executeCommand)

    #load transoder jobs
    sql = """SELECT CommandRelationships.pk 
                FROM CommandRelationships 
                JOIN Commands ON CommandRelationships.command = Commands.pk
                JOIN CommandsSupportedBy ON Commands.supportedBy = CommandsSupportedBy.pk 
                WHERE CommandsSupportedBy.description = 'supported by default archivematica client';"""
    rows = databaseInterface.queryAllSQL(sql)
    if rows:
        for row in rows:
            CommandRelationshipsPK = row[0]
            key = "transcoder_cr%s" % (CommandRelationshipsPK.__str__())
            printOutputLock.acquire()
            print "registering:", '"' + key + '"'
            printOutputLock.release()
            gm_worker.register_task(
                key, transcoderNormalizer.executeCommandReleationship)

    failMaxSleep = 30
    failSleep = 1
    failSleepIncrementor = 2
    while True:
        try:
            gm_worker.work()
        except gearman.errors.ServerUnavailable as inst:
            print >> sys.stderr, inst.args
            print >> sys.stderr, "Retrying in %d seconds." % (failSleep)
            time.sleep(failSleep)
            if failSleep < failMaxSleep:
                failSleep += failSleepIncrementor
Пример #18
0
    def handle(self, *args, **options):
        # N.B. don't take out the print statements as they're
        # very very very very very very very very very very
        # helpful in debugging supervisor+worker+gearman
        self.write_stdout('Initializing gm_worker\n')
        gm_worker = gearman.GearmanWorker(settings.GEARMAN_JOB_SERVERS)

        # Read all methods of the class and if it starts with 'task_' then
        # register as a task on gearman
        for task_name in dir(self):
            if task_name.startswith('task_'):
                task_name = str(task_name)
                t_name = task_name.replace('task_', '')
                self.write_stdout('Task: %s\n' % t_name)
                task_func = lambda i: (lambda x, y: getattr(Command, i)
                                       (self, x, y))
                gm_worker.register_task(t_name, task_func(task_name))

        self.write_stdout('Starting work\n')
        gm_worker.work()
        self.write_stdout('Ended work\n')
Пример #19
0
def startThread(threadNumber):
    """Setup a gearman client, for the thread."""
    gm_worker = gearman.GearmanWorker(
        [config.get('MCPClient', "MCPArchivematicaServer")])
    hostID = gethostname() + "_" + threadNumber.__str__()
    gm_worker.set_client_id(hostID)
    for key in supportedModules.keys():
        logger.info('Registering: %s', key)
        gm_worker.register_task(key, executeCommand)

    failMaxSleep = 30
    failSleep = 1
    failSleepIncrementor = 2
    while True:
        try:
            gm_worker.work()
        except gearman.errors.ServerUnavailable as inst:
            logger.error(
                'Gearman server is unavailable: %s. Retrying in %d seconds.',
                inst.args, failSleep)
            time.sleep(failSleep)
            if failSleep < failMaxSleep:
                failSleep += failSleepIncrementor
Пример #20
0
def startThread(threadNumber):
    """Setup a gearman client, for the thread."""
    gm_worker = gearman.GearmanWorker([config.get('MCPClient', "MCPArchivematicaServer")])
    hostID = gethostname() + "_" + threadNumber.__str__()
    gm_worker.set_client_id(hostID)
    for key in supportedModules.iterkeys():
        printOutputLock.acquire()
        print 'registering:"{}"'.format(key)
        printOutputLock.release()
        gm_worker.register_task(key, executeCommand)
            
    failMaxSleep = 30
    failSleep = 1
    failSleepIncrementor = 2
    while True:
        try:
            gm_worker.work()
        except gearman.errors.ServerUnavailable as inst:
            print >>sys.stderr, inst.args
            print >>sys.stderr, "Retrying in %d seconds." % (failSleep)
            time.sleep(failSleep)
            if failSleep < failMaxSleep:
                failSleep += failSleepIncrementor
Пример #21
0
import traceback
import torch
import numpy as np
from multiprocessing import Process, Queue, Pool, Manager
import os

from PIL import Image

from tool.utils import transform_extension_path

sys.path.append('/home/user/workspace/priv-0220/Pet-engine')
from core.semseg_priv_config import cfg_priv
sys.path.append(cfg_priv.PET_ROOT)
from modules import pet_engine

gm_worker = gearman.GearmanWorker(['localhost:4730'])
semseg_inference = None

q_file = Queue()
q_name = Queue()


def read_q(q_file, q_name):
    while True:
        if q_file.empty() and q_name.empty():
            file = q_file.get(True)
            name = q_name.get(True)
            print('name -- {}'.format(name))
            np.save(name, file)

Пример #22
0
import sys
import urllib2
#import urllib.request as urlreq
from bs4 import BeautifulSoup as bs
import requests
from multiprocessing import Pool
import os
import getopt
import threading
import wikipedia
import sys
import codecs

from StringIO import StringIO

gm_worker = gearman.GearmanWorker(['10.0.2.13:4730'])


def linklisting(search):
    linklist = []
    url = "http://www.google.com/search?"
    payload = {'q': search}
    try:
        r = requests.get(url, payload, timeout=10)
        #print(r.url)
        print "--------------------------------".encode('utf-8')
        print "\t", search, "\t".encode('utf-8')
        print "--------------------------------".encode('utf-8')
        #		print "1".encode('utf-8')
        if r.status_code == 200:
            html = r.text
Пример #23
0
import gearman

gm_worker = gearman.GearmanWorker(['172.26.183.16:4735', '172.26.183.15:4735'])


def task_listener_reverse(gearman_worker, gearman_job):
    print 'Reversing string: ' + gearman_job.data
    return gearman_job.data[::-1]


# gm_worker.set_client_id is optional
# gm_worker.set_client_id('python-worker')
gm_worker.register_task('reverse', task_listener_reverse)

# Enter our work loop and call gm_worker.after_poll() after each time we timeout/see socket activity
gm_worker.work()
Пример #24
0
from collections import defaultdict
from elasticsearch import Elasticsearch



# 存储csv的目录
csv_path = '../data/csvs/'

# 定义gearman server
GEARMAN_SERVERS = ['127.0.0.1:4730']
ES_HOST = '127.0.0.1'
ES_PORT = 9200


# 建立链接
gearman_worker = gearman.GearmanWorker(GEARMAN_SERVERS)


# 调用分词器
def es_analyze_name(name):
    # 建立链接
    es = Elasticsearch([{'host': ES_HOST, 'port': ES_PORT }])

    # 调用分词器
    res = es.indices.analyze('customers',body={'analyzer':'ik_smart','text': name})
    words = res['tokens']

    # 解析分词结果
    word_list = []
    for word in words :
        word_list.append(word['token'])
    # 记录日志
    print_log('create_live_activity',
              '%s ==========================> Finished' % (data['event_id']))
    return ''


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-env',
                        action='store',
                        dest='wxenv',
                        required=True,
                        help='Test|Stage|Production')
    args = parser.parse_args(sys.argv[1:])
    wxenv = args.wxenv
    if wxenv not in ['Local', 'Test', 'Stage', 'Production', 'UnitTest']:
        raise EnvironmentError('The environment variable (WXENV) is invalid ')
    os.environ['WXENV'] = wxenv
    sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
    from wanx import app
    servers = app.config.get("GEARMAN_SERVERS")
    gm_worker = gearman.GearmanWorker(servers)
    gm_worker.set_client_id('create_live_activity')
    gm_worker.register_task('create_live_activity', do_task)
    try:
        gm_worker.work()
    except:
        pass

    gm_worker.unregister_task('create_live_activity')
Пример #26
0
import time
import json

from addtask import addtask
from lib.cmdLib import *


def gearmanwork(gearman_worker, gearman_job):
    params = {"RET": "INTERNAL ERROR"}
    try:
        params = json.loads(gearman_job.data)
        '''addtask(params['JenkinsURL'], params['JobName'], params['BuildParams'])'''
        cmdstr = "python addtask.py " + params['JenkinsURL'] + " " + params[
            'JobName'] + " '" + json.dumps(params['BuildParams']) + "'"
        print cmdstr
        status, output = cmd_execute(cmdstr)
    except Exception as e:
        print(e)
    finally:
        print json.dumps(params)
        return json.dumps(params)


if __name__ == '__main__':
    try:
        worker = gearman.GearmanWorker(["127.0.0.1:8899"])
        worker.register_task('gearmanwork', gearmanwork)
        worker.work()
    except Exception as e:
        print(e)
Пример #27
0
def worker():
    gm_worker = gearman.GearmanWorker(['localhost:4730'])
    gm_worker.register_task('index', work)
    gm_worker.work()
Пример #28
0
# coding=utf-8
from Base import *
from scrapy import Selector
import requests
import gearman
import pymongo
import uniout
import hashlib
import time
import re

JobWorker = gearman.GearmanWorker([app_config['job']['gearman']])
Mongo = pymongo.MongoClient(host=app_config['db']['mongo'],
                            socketTimeoutMS=None,
                            socketKeepAlive=True)
Collection = Mongo['wulo']
Database = Collection['data']
RAWDB = Collection['raw']

_hash = hashlib.sha224()

# res = requests.get('https://www.ptt.cc/bbs/StupidClown/M.1457268374.A.827.html')
# res = requests.get('https://www.ptt.cc/bbs/StupidClown/M.1457268828.A.363.html')
# res = requests.get('https://www.ptt.cc/bbs/StupidClown/M.1457273711.A.902.html')
res = requests.get('https://www.ptt.cc/bbs/LGBT_SEX/M.1456717141.A.C4D.html',
                   headers={'cookie': ';over18=1;'})

if res.status_code == 200:
    _hash.update(res.url)
    hexhash = _hash.hexdigest()
    RAWDB.update_one(
Пример #29
0
import requests
import json
import time
import sys
import traceback
import cv2
import requests
from urllib.parse import quote
import gearman
import base64
import numpy as np

gm_worker = gearman.GearmanWorker(['127.0.0.1:4730'])


def task_listener_reverse(gearman_worker, gearman_job):
    print("receve data")
    s=gearman_job.data

    img_s = json.loads(s)

    d64 = base64.b64decode(img_s['imgs'])
    nparr = np.fromstring(d64, np.uint8)
    image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    cv2.imwrite('./imgs/woker_py.jpg', image)

    # 2 维度数组
    bbox = img_s['bbox']
    print(bbox)
    print("write img done")
#!/usr/bin/python
import gearman, json, hashlib, argparse

parser = argparse.ArgumentParser()
parser.add_argument('server', help='Address of server')
parser.add_argument('-p', '--port', help='Port at the server', default='4730')
args = parser.parse_args()


def crack(gearman_worker, gearman_job):
    var = gearman_job.data
    var = json.loads(var)
    hash = var['hash']
    words = var['words']
    for i in words:
        word = i.rstrip().lower()
        hash_object = hashlib.sha256(word)
        hex_dig = hash_object.hexdigest()
        print 'Word:' + i + 'Hash:' + hex_dig + '\n'
        if hex_dig == hash:
            return str(word)
    return ""


gm_worker = gearman.GearmanWorker([args.server + ':' + args.port])
gm_worker.register_task('crack', crack)
gm_worker.work()