Ejemplo n.º 1
0
def worker_main(queue):
    dcap = dict(DesiredCapabilities.PHANTOMJS)
    browser = webdriver.PhantomJS(
        '/usr/local/nvm/versions/node/v9.2.0/lib/node_modules/phantomjs-prebuilt/bin/phantomjs',
        desired_capabilities=dcap)
    while True:
        LINK = queue.get(True)
        worker(browser, LINK)
        queue.task_done()
 def addToQueue(unpacker, packetType, jsonData):
     unpacker.queueDict[packetType].append(jsonData)
     if (len(unpacker.queueDict[packetType]) >= QueueLimits[packetType]):
         finalData = {
             "packets": unpacker.queueDict[packetType]
         }
         print("About to send the following type of data", packetType)
         # requests.post(add_data_url, json=finalData)
         worker.worker(add_data_url, finalData)
         print("unblocked")
         unpacker.queueDict[packetType] = []
Ejemplo n.º 3
0
    def initContentFetchWorkers(self):
        """ Initialize all worker threads
        """
        logger.debug("Initializing the content fetching worker threads.")

        workerNumber = 0

        for keyitem in self.mods.keys():

            if self.mods[keyitem].pluginType in [
                    Types.MODULE_NEWS_CONTENT, Types.MODULE_NEWS_API,
                    Types.MODULE_DATA_CONTENT
            ]:
                workerNumber = workerNumber + 1

                self.contentFetchWorkers[workerNumber] = worker(
                    self.mods[keyitem],
                    Types.TASK_GET_DATA,
                    self.workCompletedURLs,
                    # make unique worker names
                    name=str(workerNumber + len(self.urlSrcWorkers)),
                    daemon=False)

                self.contentFetchWorkers[workerNumber].setRunDate(self.runDate)
                # after this, the self.contentFetchWorkers dict has the structure: workers[1] = <instantiated worker object>

        logger.info("%s worker threads available to fetch content.",
                    len(self.contentFetchWorkers))
        if len(self.contentFetchWorkers) != self.workerThreads:
            logger.error(
                "Could not initialize required no of content fetching worker threads."
            )
Ejemplo n.º 4
0
    def initURLSourcingWorkers(self):
        """ Initialize all worker threads to identify URLs
        """
        logger.debug("Initializing the worker threads to identify URLs.")

        workerNumber = 0
        for keyitem in self.mods.keys():

            if self.mods[keyitem].pluginType in [
                    Types.MODULE_NEWS_CONTENT, Types.MODULE_NEWS_API,
                    Types.MODULE_DATA_CONTENT, Types.MODULE_NEWS_AGGREGATOR
            ]:

                workerNumber = workerNumber + 1

                self.urlSrcWorkers[workerNumber] = worker(
                    self.mods[keyitem],
                    Types.TASK_GET_URL_LIST,
                    self.workCompletedURLs,
                    name=workerNumber,
                    daemon=False)

                self.urlSrcWorkers[workerNumber].setRunDate(self.runDate)
                # after this, the self.urlSrcWorkers dict has the structure: workers[1] = <instantiated worker object>

        logger.info("%s worker threads available to identify URLs to source.",
                    len(self.urlSrcWorkers))
        if len(self.urlSrcWorkers) != self.workerThreads:
            logger.error(
                "Could not initialize required no of identify URL worker threads."
            )
Ejemplo n.º 5
0
def submit_job(job_type: str):
    """
    API for triggering jobs
    """
    if request.method == 'POST':
        job_type = job_type.replace('~', '/')
        # Checking if the selected job is valid
        if (job_type == 'validate_nlpql' or job_type == 'nlpql_tester') and request.data:
            _, res = submit_test(request.data)
            return json.dumps(res, indent=4, sort_keys=True)
        elif job_type == 'register_nlpql' and request.data:
            res = request.data.decode("utf-8")
            return json.dumps(add_custom_nlpql(res), indent=4, sort_keys=True)
        if not valid_job(job_type):
            return Response(json.dumps({'message': 'Invalid API route. Valid Routes: ' + get_api_routes()}, indent=4,
                                       sort_keys=True), status=400,
                            mimetype='application/json')
        else:
            data = request.get_json()
            job_file_path = "nlpql/" + job_type + ".nlpql"
            return worker(job_file_path, data)
    else:
        return Response(json.dumps({'message': 'API supports only POST requests'}, indent=4, sort_keys=True),
                        status=400,
                        mimetype='application/json')
Ejemplo n.º 6
0
async def test_worker_retry(
    process_resource, sample_resource, input_queue, output_queue
):
    """Worker should retry after a single error."""

    process_resource.side_effect = [
        ProcessedResource(
            processed=False,
            processing_date=datetime.datetime.utcnow(),
            resource=sample_resource,
        ),
        ProcessedResource(
            processed=True,
            processing_date=datetime.datetime.utcnow(),
            resource=sample_resource,
        ),
    ]
    future = asyncio.ensure_future(worker("test", input_queue, output_queue))
    await input_queue.put(sample_resource)
    await input_queue.join()
    second = sample_resource.copy()
    second["retries"] = 1
    process_resource.assert_has_calls(
        [call(sample_resource), call(second),]
    )
    assert input_queue.empty()
    # Only one output
    assert output_queue.qsize() == 1
    result = await output_queue.get()
    # Result will have creation_date as a datetime
    result["creation_date"] = result["creation_date"].isoformat()
    assert result == sample_resource
    future.cancel()
Ejemplo n.º 7
0
 def __init__(self, debug, plot, dict, wave):
     self.debug = debug
     self.plot = plot
     self.first = True
     self.queue = multiprocessing.Queue()
     self.characteristic = characteristics.characteristic(debug)
     self.worker = worker.worker(self.queue, debug, plot, dict, wave)
Ejemplo n.º 8
0
def submit_job_with_category(job_category: str, job_name: str):
    """
    API for triggering jobs
    """
    h = get_host(request)
    # print(h)

    try:
        async_arg = request.args.get('async').lower()
        async_job = async_arg == 'true' or async_arg == 't' or async_arg == '1'
    except:
        async_job = False
    synchronous = not async_job
    job_type = "{}/{}".format(job_category, job_name)
    job_file_path = "./nlpql/" + job_type + ".nlpql"
    if not valid_job(job_type):
        return Response(json.dumps(
            {
                'message':
                'Invalid API route. Valid Routes: ' + get_api_routes()
            },
            indent=4,
            sort_keys=True),
                        status=400,
                        mimetype='application/json')
    if request.method == 'POST':
        # Checking if the selected job is valid
        data = request.get_json()
        return worker(job_file_path, data, synchronous=synchronous)
    else:

        return Response(get_nlpql(job_file_path),
                        status=200,
                        mimetype='text/plain')
Ejemplo n.º 9
0
 def create_one(self, id, isn):
     one = worker(id, isn, self.results, self.share_dic, self.locks,
                  self.barriers, self.s2c_io, self.c2s_io)
     one.daemon = True
     one.start()
     self.workers.append(one)
     self.workers_number += 1
Ejemplo n.º 10
0
def jump():
    global name
    reg = Gui()
    if re.match("S", name):
        student(reg, mysql, name)
        gui.close()
    elif re.match("T", name):
        teacher(reg, mysql, name)
        gui.close()
    elif re.match("W", name):
        worker(reg, mysql, name)
        gui.close()
    elif re.match("M", name):
        manager(reg, mysql, name)
        gui.close()
    gui.close()
Ejemplo n.º 11
0
def submit_job_with_nlpql(j):
    the_json = request.get_json()
    nlpql = the_json.get('nlpql', '')

    try:
        async_job = request.args.get('async') == 'true'
    except:
        async_job = False

    try:
        return_null_results = request.args.get('return_null_results') == 'true'
    except:
        return_null_results = False
    synchronous = not async_job

    if nlpql == '':
        return Response(json.dumps(
            {
                'message':
                'Invalid body for this endpoint. Please make sure NLPQL is passed in.'
            },
            indent=4,
            sort_keys=True),
                        status=400,
                        mimetype='application/json')

    return worker('',
                  the_json,
                  synchronous=synchronous,
                  return_null_results=return_null_results,
                  nlpql=nlpql)
Ejemplo n.º 12
0
async def test_worker_max_retries(
    process_resource, sample_resource, input_queue, output_queue
):
    """Worker will give up after a sufficient number of retries."""

    fails = []
    for i in range(config.MAX_ATTEMPTS):
        resource = sample_resource.copy()
        resource["retries"] = i
        result = ProcessedResource(
            processed=False,
            processing_date=datetime.datetime.utcnow(),
            resource=resource,
        )
        fails.append(result)
    process_resource.side_effect = fails + [
        ProcessedResource(
            processed=True,
            processing_date=datetime.datetime.utcnow(),
            resource=sample_resource,
        ),
    ]
    future = asyncio.ensure_future(worker("test", input_queue, output_queue))
    await input_queue.put(sample_resource)
    await input_queue.join()
    assert process_resource.call_count == config.MAX_ATTEMPTS
    assert input_queue.empty()
    # No output expected
    assert output_queue.empty()
    future.cancel()
Ejemplo n.º 13
0
    def run(self):
        self.Wgetopt()
        #print("-->request: \n%s") % self.req
        #print("-->host: \n%s") % self.header_host
        #print("-->port: \n%s") % self.port

        rest_concur = self.concur_count % self.thread_count
        rest_req = self.req_count % self.thread_count

        for i in range(self.thread_count):
            reqs = self.req_count / self.thread_count
            concur = self.concur_count / self.thread_count

            if rest_concur:
                concur += 1
                rest_concur -= 1

            if rest_req:
                reqs += 1
                rest_req -= 1

            worker_t = worker(self, i, concur, reqs)

            threading.Thread(target=worker_t.work_thread(),
                             args=worker_t,
                             name="thread-" + str(worker_t.id))
Ejemplo n.º 14
0
 def viewInfo(self, index):
     ''' docstring: 双击条目显示文件内容 '''
     filepath = self.fd.getData(index.row(), 1)
     filename = self.fd.getData(index.row(), 0)
     filepid = self.fd.getData(index.row(), 2)
     viewjsonworker = worker(0, self.viewJson, filename, filepath, filepid)
     # viewjsonworker.signals.result.connect(self.logText.append)
     self.threadpool.start(viewjsonworker)
Ejemplo n.º 15
0
async def test_worker_empty_queue(process_resource, input_queue, output_queue):
    """Starting a new worker with no work to process."""

    future = asyncio.ensure_future(worker("test", input_queue, output_queue))
    # No work to do
    assert not process_resource.called
    assert output_queue.empty()
    future.cancel()
Ejemplo n.º 16
0
    def addThread(self):
        self._cleanArray()
        self._threadArrayLen += 1
        tname = "Thread-{} worker".format(self._threadArrayLen)
#        tname= "Thread-{} ".format(self._threadArrayLen),self.getName()
        updater = worker.worker(queue=self._queue,db=self._db,paths=self._paths, http_pool=self._http_pool, intervall=0.3, name=tname)  # ...Instantiate a thread and pass a unique ID to it
        self._threadArray.append(updater)
        updater.start()
Ejemplo n.º 17
0
 def __init_workers__(self):
     """Create the workers!"""
     if self.workers == []:
         for i in range(self.thread_size):
             _worker = worker.worker(name = "V-%d" % i, master = self)
             _worker.start()
             self.workers.append(_worker)
         print "[*] Create %d workers!" % self.thread_size
     else:
         print "[!] Init the workers Failed !!!"  
Ejemplo n.º 18
0
def lamp(files):
    """
    This function starts to solve the versioning files
    This function verifies if each file is supported, that is, has a package manager specified in languagesConfiguration
    Then, verifies if each file exists and has a 'date' key
    :param files: the versioning files path
    :return: None
    """
    for file in files:
        path, pck_mng = search_language(file)

        try:
            file_obj, date = verify_file(path, pck_mng)
        except:
            logging.error(Fore.RED +
                          'The file \'{}\' will not be executed'.format(file))
            continue  # doesn't execute this file

        # resolve all range versions
        wk.worker(file_obj, date, path, pck_mng)
Ejemplo n.º 19
0
    def execute(self):
        try:
            #launch multithreading to execute
            for i in range(1, int(NUM_WORKERS)+1, 1):
                t = worker(self.taskQueue, self.processs, i)
                t.daemon = True
                t.start()

            self.taskQueue.join()

        except IOError, e:
            print "executing have an exception occurred"
Ejemplo n.º 20
0
 def __init__(self,ip,port, pythonPath):
    self.connection = connection.connection()
    if not self.connection.connect(ip, port):
        return
    cur_version = '{0}.{1}.{2};'.format(sys.version_info[0], sys.version_info[1], sys.version_info[2]) + getModulesList()
    self.connection.sendMessage('Join', cur_version)
    self.status = 'disconnected'
    self.codePath = ''
    self.result = ''
    self.name = 'defaultNodeName'
    self.worker = worker.worker(pythonPath)
    self.run()
Ejemplo n.º 21
0
 def init(self, ip, port):
     self._server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
     self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR , 1)
     if not ip:
         ip = '127.0.0.1'
     if not port:
         port = 8888
     self._server.bind((ip, port))
     self._server.listen(10)
     
     for _ in range(self._num_workers):
         w = worker.worker()
         self._workers.append(w)
Ejemplo n.º 22
0
 def delItem(self):
     ''' docstring: 删除选中条目(拉起额外线程处理) '''
     if len(self.selectItems) == 0:
         self.setStatus('未选中任何条目')
     else:
         nowSelectItems = self.selectItems.copy()
         delitemworker = worker(1, self.delItem_multi, nowSelectItems)
         delitemworker.signals.finished.connect(
             lambda: self.modelViewUpdate())
         delitemworker.signals.finished.connect(
             lambda: self.setStatus('条目已删除'))
         delitemworker.signals.message.connect(self.logWidget.addLog)
         self.threadpool.start(delitemworker)
Ejemplo n.º 23
0
def main():
    
    
    #env_name = 'CartPole-v0'
    #env_name = 'MountainCar-v0'
    #env_name = 'Acrobot-v1'
    #env_name = 'Pong-v4'
    #env_name = 'PongDeterministic-v4'
    #env = gym.make(env_name)

    #env = retro.make(game='/Users/mainuser/devbin/retro_contest/sonic/',
    #        state='GreenHillZone.Act1', record='./logs')

    #https://github.com/openai/retro/blob/master/retro/retro_env.py#L114
    #env = retro.make(game='Airstriker-Genesis', 
            #use_restricted_actions=retro.ACTIONS_DISCRETE)



    #env = gym.make('Acrobot-v1')
    #env = gym.make('CartPole-v0')
    #env = gym.make('Pong-v4')
    env = gym.make('SpaceInvaders-v0')
    #env = gym.make('Breakout-v0')
    env.seed(0)
    print('[+] environment %s initialized' % '')
    
    #since we are preprocessing state
    state = env.reset()
    n_actions = env.action_space.n #discrete env
    state_shape = worker(None).process_state(state).shape
    print('[*] state shape: %s --> %s\n[*] actions: %s' % (
            state.shape, state_shape, n_actions))
    agent = worker(model(state_shape, n_actions))
    print('[+] worker initialized')

    agent.train(env, episodes=10, print_interval=1)
    agent.test(env, episodes=1, print_interval=1, records=4)
Ejemplo n.º 24
0
 def dowItem(self):
     ''' docstring: 从远端下载数据(拉起额外线程处理) '''
     if len(self.selectItems) == 0:
         self.setStatus('未选中任何条目')
     else:
         nowSelectItems = self.selectItems.copy()
         dowitemworker = worker(1, self.dowItem_multi, nowSelectItems)
         for item in nowSelectItems:
             dowitemworker.signals.progress.connect(
                 self.updateProgress(item, 4))
         dowitemworker.signals.finished.connect(
             lambda: self.setStatus('条目已下载或服务已开始获取'))
         dowitemworker.signals.message.connect(self.logWidget.addLog)
         self.threadpool.start(dowitemworker)
Ejemplo n.º 25
0
async def test_worker_process_error(
    process_resource, sample_resource, input_queue, output_queue
):
    """Worker should handle errors from the processor."""

    process_resource.side_effect = ValueError
    future = asyncio.ensure_future(worker("test", input_queue, output_queue))
    await input_queue.put(sample_resource)
    await input_queue.join()
    process_resource.assert_called_once_with(sample_resource)
    assert input_queue.empty()
    # No output expected
    assert output_queue.empty()
    future.cancel()
Ejemplo n.º 26
0
 def undoRegItem(self):
     ''' docstring: 取消通告(拉起额外线程处理) '''
     if len(self.selectItems) == 0:
         self.setStatus('未选中条目')
     else:
         nowSelectItem = self.selectItems.copy()
         undoregworker = worker(1, self.undoRegItem_multi, nowSelectItem)
         for item in nowSelectItem:
             undoregworker.signals.progress.connect(
                 self.updateProgress(item, 3))
         undoregworker.signals.finished.connect(
             lambda: self.setStatus('条目已取消通告'))
         undoregworker.signals.message.connect(self.logWidget.addLog)
         self.threadpool.start(undoregworker)
Ejemplo n.º 27
0
def aggregate_rollouts(master, A, params, n_samples):

    all_rollouts = np.zeros([n_samples, 2])

    timesteps = 0

    for i in range(n_samples):
        w = worker(params, master, A, i)
        all_rollouts[i] = np.reshape(w.do_rollouts(), 2)
        timesteps += w.timesteps

    all_rollouts = (all_rollouts -
                    np.mean(all_rollouts)) / (np.std(all_rollouts) + 1e-8)

    m = np.array(all_rollouts[:, 0] - all_rollouts[:, 1])
    return (m, timesteps)
Ejemplo n.º 28
0
async def test_worker_result(
    process_resource, sample_resource, input_queue, output_queue
):
    """Worker should pull from input and put to the output."""

    future = asyncio.ensure_future(worker("test", input_queue, output_queue))
    await input_queue.put(sample_resource)
    await input_queue.join()
    process_resource.assert_called_once_with(sample_resource)
    assert input_queue.empty()
    assert output_queue.qsize() == 1
    result = await output_queue.get()
    # Result will have creation_date as a datetime
    result["creation_date"] = result["creation_date"].isoformat()
    assert result == sample_resource
    future.cancel()
Ejemplo n.º 29
0
 def __init__(self,conf):
     with open(conf,'r') as fd:
         config = yaml.load(fd)
     self.deps = config['Dependences']
     self.config = config['Config']
     self.dataDir = self.config['dataDir']
     self.webMntPlg = self.config['enable_web_management']
     self.user = self.config['user']
     self.userTag = self.config['tag']
     self.vhost = self.config['vhost']
     self.vhost_enable = self.vhost['enable']
     self.vhost_path = self.vhost['path']
     self.userPermission = self.config['permission']
     self.worker = worker()
     self.color = colors()
     self.clusterNodes = self.config['clusterNodes']
     self.master = self.clusterNodes['mq01']
Ejemplo n.º 30
0
    def run(self):
        """
        Do the actual training.
        """

        ## initialize workers
        workers = []
        for wId in xrange(self.cfg['nWorkers']):
            thisWorker = worker(self, id=wId)
            workers.append(thisWorker)
            thisWorker.start()

        for x in workers:
            x.join()
        self.save(os.path.join(self.outputDir, "final"),
                  savePlots=True,
                  overwrite=True)
Ejemplo n.º 31
0
def submit_job_with_category(job_category: str, job_name: str):
    """
    API for triggering jobs
    """
    if request.method == 'POST':
        job_type = "{}/{}".format(job_category, job_name)
        # Checking if the selected job is valid
        if not valid_job(job_type):
            return Response(json.dumps({'message': 'Invalid API route. Valid Routes: ' + get_api_routes()}, indent=4,
                                       sort_keys=True), status=400,
                            mimetype='application/json')
        else:
            data = request.get_json()
            job_file_path = "nlpql/" + job_type + ".nlpql"
            return worker(job_file_path, data)
    else:
        return Response(json.dumps({'message': 'API supports only POST requests'}, indent=4, sort_keys=True),
                        status=400,
                        mimetype='application/json')
Ejemplo n.º 32
0
 def advancedRegItem(self, nowSelectItem):
     ''' docstring: 高级通告(拉起额外线程处理,单体) '''
     if nowSelectItem < 0 or nowSelectItem >= self.fd.rowCount():
         return
     filepath = self.fd.getData(nowSelectItem, 1)
     level = self.fd.getData(nowSelectItem, 5)
     whitelist = self.fd.getData(nowSelectItem, 6)
     kwargs = {}
     if level:
         kwargs['level'] = int(level)
     if whitelist:
         kwargs['WhiteList'] = list(map(int, whitelist.split(',')))
     regitemworker = worker(0, AddCacheSidUnit, filepath, 1, 1, 1, 1,
                            **kwargs)
     regitemworker.signals.finished.connect(
         lambda: self.updateProgress(nowSelectItem, 3)(100))
     regitemworker.signals.finished.connect(lambda: self.setStatus('条目已通告'))
     regitemworker.signals.finished.connect(SidAnn)
     self.threadpool.start(regitemworker)
Ejemplo n.º 33
0
def create_workers(config_dict):
    '''Function: create_workers

    Description: This function creates worker class objects, using the read in configuration to populate the components.

    Input Variables:
    config_dict (dict)          - The dictionary of configuration that has been read in from the config file.

    Returns:            
    construction (array)        - An array containing all of the worker objects.
    '''
    construction = []
    for condition in config_dict:
        try:
            json.loads(config_dict[condition]['parameters'])
            job=worker.worker(worker_name=condition,config=config_dict[condition])
            construction.append(job)
        except ValueError:
            logwork.log_work('ERROR: Rule {} does not contain valid JSON. Please review and amend'.format(condition))
    return construction
Ejemplo n.º 34
0
 def openFolder(self):
     ''' docstring: 打开所选文件所在文件夹(拉起额外线程处理) '''
     nowSelectItem = self.selectItems.copy()
     if len(nowSelectItem) == 0:
         self.setStatus('未选中文件')
         return
     elif len(nowSelectItem) != 1:
         self.setStatus('选中文件过多')
         return
     # print(nowSelectItem[0])
     tmp = self.fd.getData(nowSelectItem[0], 1)
     # print(tmp)
     if not os.path.exists(tmp) or not tmp:
         self.setStatus('文件不存在')
         return
     filepath = tmp[:tmp.rfind('/')]
     openfolderworker = worker(0, os.startfile, filepath)
     openfolderworker.signals.finished.connect(
         lambda: self.setStatus('文件已打开'))
     self.threadpool.start(openfolderworker)
Ejemplo n.º 35
0
    def __init__(self,config):
        configure = ConfigParser.RawConfigParser()
        configure.read(config)
        self.ip = configure.get('master', 'ip')
        self.port = configure.get('master', 'port')

        # 发送任务状态thrift初始化
        self.conn = Connection()
        self.conn.connect(self.ip, self.port)

        self.net_monitor_ = net_monitor()
        ip = get_local_ip("eth0")
        port = 9091
        info = None
        task_num = 0
        name = "crawler_%s" %(ip)
        uuid = md5.new('%s:%s' % (ip, datetime.datetime.now())).hexdigest()
        self.worker = worker(uuid, name, ip, info, task_num, port)
        self.threads = []
        # worker 注册
        self.register()
        self.tasks = {}
        self.ports = []
        for i in range(10011,10111):
            self.ports.append(str(i))
        self.end_threads = []
        self.run_tasks = {}
        # 初始化 thrift 连接 线程
        self.thrift = threading.Thread(target=crawler_worker.init_thrift, args=())
        self.thrift.start()
        # 初始化 资源回收 线程
        self.recycle = threading.Thread(target=crawler_worker.recycle_res, args=(1,self.end_threads))
        self.recycle.start()
        # 初始化 拨号 线程
        self.dail = threading.Thread(target=crawler_worker.adsl_dail, args=(self.run_tasks, self.worker))
        self.dail.start()
        # 初始化 发送worker 信息线程
        self.worker_thread = threading.Thread(target=crawler_worker.worker_info_send, args=(self.run_tasks, self.worker, self.conn))
        self.worker_thread.start()
Ejemplo n.º 36
0
def fetcher(request):

    global id

    if request.method == "GET":
        urls = []
        for id in data_by_id:
            _, interval = data_by_url[data_by_id[id]]
            urls.append({
                "id": id,
                "url": data_by_id[id],
                "interval": interval
            })

        return json(body=urls)

    try:
        new_data = request.json

    except exceptions.InvalidUsage:
        return response.html(body="", status=400)

    try:
        url_id, _ = data_by_url[new_data['url']]
        data_by_url[new_data['url']] = (url_id, new_data['interval'])

    except KeyError:
        id += 1
        data_by_url[new_data['url']] = (id, new_data['interval'])
        data_by_id[id] = new_data['url']
        url_id = id

    new_worker = worker(id, new_data['url'], new_data['interval'])
    workers_by_id[url_id] = new_worker
    new_worker.work()

    return json({'id': url_id})
Ejemplo n.º 37
0
def run_worker():
    instance = worker.worker()
    p = multiprocessing.current_process()
    print("Starting eventloop in %s %d" % (p.name, p.pid))
    instance.run()
Ejemplo n.º 38
0
import logging
import worker

format='%(asctime)s %(message)s'

logging.basicConfig(filename='basic.log', format=format, level=logging.DEBUG)


logging.debug("debug level message")
logging.warning("debug level message")

worker.worker()

logging.info("test complete")
Ejemplo n.º 39
0
 def __init__(self, debug, plot, dict, wave):
     self.debug = debug
     self.plot = plot
     self.queue = multiprocessing.Queue()
     self.worker = worker.worker(self.queue, debug, plot, dict, wave)
Ejemplo n.º 40
0
import sys
from devicedriver import devicedriver
from TCPAdapter import TCPAdapter
from TCPCameraAdapter import TCPCameraAdapter
from worker import worker

#main code

#TCP_IP = '10.172.42.160'
print "app start!!!!"
time.sleep(1)

d = devicedriver()
d.checkDevice()

w = worker()

adapter = TCPAdapter(5005)
adapter.ConnStart()

reporter = TCPAdapter(5000)
reporter.ConnStart()

Cadapter = TCPCameraAdapter(5555)
Cadapter.ConnStart()

#thread.start_new_thread( Cadapter.DoListen,())
thread.start_new_thread( Cadapter.GetFrameCMD,(w,))
print 'app started!'

Start=True
Ejemplo n.º 41
0
def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
    """apply the merge action list to the working directory

    wctx is the working copy context
    mctx is the context to be merged into the working copy

    Return a tuple of counts (updated, merged, removed, unresolved) that
    describes how many files were affected by the update.
    """

    updated, merged, removed, unresolved = 0, 0, 0, 0
    ms = mergestate(repo)
    ms.reset(wctx.p1().node(), mctx.node())
    moves = []
    for m, l in actions.items():
        l.sort()

    # prescan for merges
    for f, args, msg in actions['m']:
        f1, f2, fa, move, anc = args
        if f == '.hgsubstate': # merged internally
            continue
        repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
        fcl = wctx[f1]
        fco = mctx[f2]
        actx = repo[anc]
        if fa in actx:
            fca = actx[fa]
        else:
            fca = repo.filectx(f1, fileid=nullrev)
        ms.add(fcl, fco, fca, f)
        if f1 != f and move:
            moves.append(f1)

    audit = repo.wvfs.audit
    _updating = _('updating')
    _files = _('files')
    progress = repo.ui.progress

    # remove renamed files after safely stored
    for f in moves:
        if os.path.lexists(repo.wjoin(f)):
            repo.ui.debug("removing %s\n" % f)
            audit(f)
            util.unlinkpath(repo.wjoin(f))

    numupdates = sum(len(l) for m, l in actions.items() if m != 'k')

    def dirtysubstate():
        # mark '.hgsubstate' as possibly dirty forcibly, because
        # modified '.hgsubstate' is misunderstood as clean,
        # when both st_size/st_mtime of '.hgsubstate' aren't changed,
        # even if "submerge" fails and '.hgsubstate' is inconsistent
        repo.dirstate.normallookup('.hgsubstate')

    if [a for a in actions['r'] if a[0] == '.hgsubstate']:
        dirtysubstate()
        subrepo.submerge(repo, wctx, mctx, wctx, overwrite)

    # remove in parallel (must come first)
    z = 0
    prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
    for i, item in prog:
        z += i
        progress(_updating, z, item=item, total=numupdates, unit=_files)
    removed = len(actions['r'])

    # get in parallel
    prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
    for i, item in prog:
        z += i
        progress(_updating, z, item=item, total=numupdates, unit=_files)
    updated = len(actions['g'])

    if [a for a in actions['g'] if a[0] == '.hgsubstate']:
        dirtysubstate()
        subrepo.submerge(repo, wctx, mctx, wctx, overwrite)

    # forget (manifest only, just log it) (must come first)
    for f, args, msg in actions['f']:
        repo.ui.debug(" %s: %s -> f\n" % (f, msg))
        z += 1
        progress(_updating, z, item=f, total=numupdates, unit=_files)

    # re-add (manifest only, just log it)
    for f, args, msg in actions['a']:
        repo.ui.debug(" %s: %s -> a\n" % (f, msg))
        z += 1
        progress(_updating, z, item=f, total=numupdates, unit=_files)

    # keep (noop, just log it)
    for f, args, msg in actions['k']:
        repo.ui.debug(" %s: %s -> k\n" % (f, msg))
        # no progress

    # merge
    for f, args, msg in actions['m']:
        repo.ui.debug(" %s: %s -> m\n" % (f, msg))
        z += 1
        progress(_updating, z, item=f, total=numupdates, unit=_files)
        if f == '.hgsubstate': # subrepo states need updating
            dirtysubstate()
            subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
                             overwrite)
            continue
        audit(f)
        r = ms.resolve(f, wctx, labels=labels)
        if r is not None and r > 0:
            unresolved += 1
        else:
            if r is None:
                updated += 1
            else:
                merged += 1

    # directory rename, move local
    for f, args, msg in actions['dm']:
        repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
        z += 1
        progress(_updating, z, item=f, total=numupdates, unit=_files)
        f0, flags = args
        repo.ui.note(_("moving %s to %s\n") % (f0, f))
        audit(f)
        repo.wwrite(f, wctx.filectx(f0).data(), flags)
        util.unlinkpath(repo.wjoin(f0))
        updated += 1

    # local directory rename, get
    for f, args, msg in actions['dg']:
        repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
        z += 1
        progress(_updating, z, item=f, total=numupdates, unit=_files)
        f0, flags = args
        repo.ui.note(_("getting %s to %s\n") % (f0, f))
        repo.wwrite(f, mctx.filectx(f0).data(), flags)
        updated += 1

    # exec
    for f, args, msg in actions['e']:
        repo.ui.debug(" %s: %s -> e\n" % (f, msg))
        z += 1
        progress(_updating, z, item=f, total=numupdates, unit=_files)
        flags, = args
        audit(f)
        util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
        updated += 1

    ms.commit()
    progress(_updating, None, total=numupdates, unit=_files)

    return updated, merged, removed, unresolved
Ejemplo n.º 42
0
def applyupdates(repo, actions, wctx, mctx, actx, overwrite):
    """apply the merge action list to the working directory

    wctx is the working copy context
    mctx is the context to be merged into the working copy
    actx is the context of the common ancestor

    Return a tuple of counts (updated, merged, removed, unresolved) that
    describes how many files were affected by the update.
    """

    updated, merged, removed, unresolved = 0, 0, 0, 0
    ms = mergestate(repo)
    ms.reset(wctx.p1().node())
    moves = []
    actions.sort(key=actionkey)

    # prescan for merges
    for a in actions:
        f, m, args, msg = a
        repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
        if m == "m": # merge
            f2, fd, move = args
            if fd == '.hgsubstate': # merged internally
                continue
            repo.ui.debug("  preserving %s for resolve of %s\n" % (f, fd))
            fcl = wctx[f]
            fco = mctx[f2]
            if mctx == actx: # backwards, use working dir parent as ancestor
                if fcl.parents():
                    fca = fcl.p1()
                else:
                    fca = repo.filectx(f, fileid=nullrev)
            else:
                fca = fcl.ancestor(fco, actx)
            if not fca:
                fca = repo.filectx(f, fileid=nullrev)
            ms.add(fcl, fco, fca, fd)
            if f != fd and move:
                moves.append(f)

    audit = repo.wopener.audit

    # remove renamed files after safely stored
    for f in moves:
        if os.path.lexists(repo.wjoin(f)):
            repo.ui.debug("removing %s\n" % f)
            audit(f)
            util.unlinkpath(repo.wjoin(f))

    numupdates = len(actions)
    workeractions = [a for a in actions if a[1] in 'gr']
    updateactions = [a for a in workeractions if a[1] == 'g']
    updated = len(updateactions)
    removeactions = [a for a in workeractions if a[1] == 'r']
    removed = len(removeactions)
    actions = [a for a in actions if a[1] not in 'gr']

    hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
    if hgsub and hgsub[0] == 'r':
        subrepo.submerge(repo, wctx, mctx, wctx, overwrite)

    z = 0
    prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
                         removeactions)
    for i, item in prog:
        z += i
        repo.ui.progress(_('updating'), z, item=item, total=numupdates,
                         unit=_('files'))
    prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
                         updateactions)
    for i, item in prog:
        z += i
        repo.ui.progress(_('updating'), z, item=item, total=numupdates,
                         unit=_('files'))

    if hgsub and hgsub[0] == 'g':
        subrepo.submerge(repo, wctx, mctx, wctx, overwrite)

    _updating = _('updating')
    _files = _('files')
    progress = repo.ui.progress

    for i, a in enumerate(actions):
        f, m, args, msg = a
        progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
        if m == "m": # merge
            f2, fd, move = args
            if fd == '.hgsubstate': # subrepo states need updating
                subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
                                 overwrite)
                continue
            audit(fd)
            r = ms.resolve(fd, wctx, mctx)
            if r is not None and r > 0:
                unresolved += 1
            else:
                if r is None:
                    updated += 1
                else:
                    merged += 1
        elif m == "d": # directory rename
            f2, fd, flags = args
            if f:
                repo.ui.note(_("moving %s to %s\n") % (f, fd))
                audit(f)
                repo.wwrite(fd, wctx.filectx(f).data(), flags)
                util.unlinkpath(repo.wjoin(f))
            if f2:
                repo.ui.note(_("getting %s to %s\n") % (f2, fd))
                repo.wwrite(fd, mctx.filectx(f2).data(), flags)
            updated += 1
        elif m == "dr": # divergent renames
            fl, = args
            repo.ui.warn(_("note: possible conflict - %s was renamed "
                           "multiple times to:\n") % f)
            for nf in fl:
                repo.ui.warn(" %s\n" % nf)
        elif m == "rd": # rename and delete
            fl, = args
            repo.ui.warn(_("note: possible conflict - %s was deleted "
                           "and renamed to:\n") % f)
            for nf in fl:
                repo.ui.warn(" %s\n" % nf)
        elif m == "e": # exec
            flags, = args
            audit(f)
            util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
            updated += 1
    ms.commit()
    progress(_updating, None, total=numupdates, unit=_files)

    return updated, merged, removed, unresolved
Ejemplo n.º 43
0
	raise SystemExit

myproxy=sventypes.Proxy()
if os.path.exists("proxy"):
	for line in open("proxy"):
		if len(line.strip())>0:
			myproxy.add(line.strip())

que=sventypes.TaskQueue()
output=outer.Outer(que)
output.start()
r=redis.StrictRedis(host=config.supervison,port=config.redis_port,db=0)
# init worker threads
ws=[]
for i in range(0,thd_num):
	w=worker.worker(i,que,num_conn,myproxy)
	ws.append(w)
	w.start()

count=0
# init taskqueue from inter,when task is more than 1000000,sleep 1

# 1.get a file from inter
# 2.if has line in file ,read line from file. init a task with the line. add task to taskqueue ;if not has line,goto 1
# 3. goto 2 
while True:
	try:
            #url=pull_url(r)
            url=r.rpop(config.localhost)
	    
	    if url is None:
Ejemplo n.º 44
0
import worker

while 1:
    program = worker.worker()
    program.start()

Ejemplo n.º 45
0
#!/usr/bin/env python

import subprocess
from worker import worker

exp = worker()

for arg1 in [0,1]:
    for arg2 in [2,3]:
        cmd = 'python test_repeater.py %d %d'%(arg1,arg2)
        outfile = "results/%d-%d.txt"%(arg1,arg2)
        exp.add(cmd, outfile)
        exp.run()