Esempio n. 1
0
    def lookup(self, buildingID):
        from building.dummy import dummy
        '''
        @buildingID: int
        '''
        building = Building.objects.get(bid=buildingID)
        print building

        if building is None:
            return None

        if self.binstances.has_key(buildingID) is False:
            ins = None
            if building.bri == BUILDING_REPRESENTATION_DEBUG:
                ins = dummy.DummyBif(buildingID)
            elif building.bri == BUILDING_REPRESENTATION_IFC:
                raise NotImplementedError, 'Missing implementation of IFC plugin'
            elif building.bri == BUILDING_REPRESENTATION_REVIT:
                raise NotImplementedError, 'Missing implementation of Revit plugin'

            # Fire up the simulator for the building if necessary.
            self.comm_queues[buildingID] = Queue()
            p = Process(target=_Manager__init_simulator, args=(ins, self.comm_queues[buildingID]))
            p.start()

            self.binstances[buildingID] = ins

        return self.binstances[buildingID]
Esempio n. 2
0
    def test_litmus_with_authentication(self):
        """Run litmus test suite on HTTP with authentification.

        This test passes
        """
        try:
            proc = Process(target=run_wsgidav_server, args=(True, False))
            proc.daemon = True
            proc.start()
            time.sleep(1)

            try:
                self.assertEqual(
                    subprocess.call([
                        "litmus", "http://127.0.0.1:8080/", "tester", "secret"
                    ]), 0, "litmus suite failed: check the log")
            except OSError:
                print "*" * 70
                print "This test requires the litmus test suite."
                print "See http://www.webdav.org/neon/litmus/"
                print "*" * 70
                raise

        finally:
            proc.terminate()
            proc.join()
def main(gcm="", rcm="", out_folder=""):
    pc = None
    pf = None

    kwargs = {
        "start_year": 1970,
        "end_year": 1999,
        "rcm": rcm, "gcm": gcm,
        "out_folder": out_folder
    }
    in_folder = "data/narccap/{0}-{1}/current".format(gcm, rcm)

    if os.path.isdir(in_folder):
        pc = Process(target=interpolate_to_amno, args=(in_folder, ), kwargs=kwargs)
        pc.start()
    else:
        print "{0} does not exist, ignoring the period ...".format(in_folder)

    kwargs = {
        "start_year": 2041,
        "end_year": 2070,
        "rcm": rcm, "gcm": gcm,
        "out_folder": out_folder
    }
    in_folder = "data/narccap/{0}-{1}/future".format(gcm, rcm)
    if os.path.isdir(in_folder):
        pf = Process(target=interpolate_to_amno, args=(in_folder, ), kwargs=kwargs)
        pf.start()
    else:
        print "{0} does not exist, ignoring the period ...".format(in_folder)

    #do current and future climates in parallel
    if pc is not None: pc.join()
    if pf is not None: pf.join()
Esempio n. 4
0
    def service_background_jobs(self):
        logger.info('service_background_jobs')
        
        # NOTE: paths must begin with a "/", indicating that the first part of 
        # the URI is a script name (which each app, i.e "reports" serves as).
        # see django.core.handlers.wsgi.__init__
        uri = '/' + '/'.join([SCHEMA.REPORTS_API_URI, JOB.resource_name ])
        data = {
            JOB.STATE: SCHEMA.VOCAB.job.state.PENDING }
        kwargs = {}

        logger.info('get jobs: %r', uri)
        resp = self.api_client.get(
            uri, data=data, **kwargs)
        
        job_listing = self.api_client.deserialize(resp)
        if API_RESULT_DATA in job_listing:
            job_listing = job_listing[API_RESULT_DATA]
        
        for job in job_listing:
            logger.info('found job: %r', job)
            job_id =job[JOB.ID]
            logger.info('Process the job: %r', job_id)
            p = Process(target=self.client_processor.service,args=(job_id,) )
            # make the parent process wait: 
            # p.daemon = True
            # if set to true, then the parent process won't wait.  
            logger.info('start')
            p.start();
            logger.info('started...')
            
        logger.debug('servicing completed')
Esempio n. 5
0
    def test_litmus_with_authentication(self):
        """Run litmus test suite on HTTP with authentification.

        This test passes
        """
        try:
            proc = Process(target=run_wsgidav_server, args=(True, False))
            proc.daemon = True
            proc.start()
            time.sleep(1)

            try:
                self.assertEqual(subprocess.call(["litmus", "http://127.0.0.1:8080/", "tester", "secret"]),
                                 0,
                                 "litmus suite failed: check the log")
            except OSError:
                print "*" * 70
                print "This test requires the litmus test suite."
                print "See http://www.webdav.org/neon/litmus/"
                print "*" * 70
                raise

        finally:
            proc.terminate()
            proc.join()
Esempio n. 6
0
def process_input_dir(args, input_path, output_path):
    patt = input_path + os.sep + "*" + args.extension
    files = glob.glob(patt)
    docs_num = len(files)
    if docs_num > args.threads:
        slice_size = docs_num / args.threads
    else:
        slice_size = 1
    print "Threads:", args.threads
    print "Documents number:", docs_num
    print "Documents per thread:", slice_size

    start = 0
    jobs = []
    for job_num in range(args.threads):
        print "Initializing process", job_num
        end = start + slice_size
        p = Process(target=lemmatize_files, args=(files[start:end], output_path, args))
        print files[start:end]
        jobs.append(p)
        p.start()
    	start += slice_size

    for p in jobs:
        p.join()
    
    if (docs_num % 2) == 1:
        lemmatize_files(files, output_path, args)
Esempio n. 7
0
def process(host,port):
    from multiprocessing.process import Process
    
    p = Process(target=run, args=(host,port))  
    p.daemon = True
    p.start()     
    return p
Esempio n. 8
0
    def service_background_jobs(self):
        logger.info('service_background_jobs')

        # NOTE: paths must begin with a "/", indicating that the first part of
        # the URI is a script name (which each app, i.e "reports" serves as).
        # see django.core.handlers.wsgi.__init__
        uri = '/' + '/'.join([SCHEMA.REPORTS_API_URI, JOB.resource_name])
        data = {JOB.STATE: SCHEMA.VOCAB.job.state.PENDING}
        kwargs = {}

        logger.info('get jobs: %r', uri)
        resp = self.api_client.get(uri, data=data, **kwargs)

        job_listing = self.api_client.deserialize(resp)
        if API_RESULT_DATA in job_listing:
            job_listing = job_listing[API_RESULT_DATA]

        for job in job_listing:
            logger.info('found job: %r', job)
            job_id = job[JOB.ID]
            logger.info('Process the job: %r', job_id)
            p = Process(target=self.client_processor.service, args=(job_id, ))
            # make the parent process wait:
            # p.daemon = True
            # if set to true, then the parent process won't wait.
            logger.info('start')
            p.start()
            logger.info('started...')

        logger.debug('servicing completed')
    def __init__(self, accessLevel, eightApi):
        self.accessLevel = accessLevel
        self._eightApi = eightApi

        pygame.init()
        self.check()
        thread = Process(target=self.inputHandler)
        thread.start()
Esempio n. 10
0
def async_file_reading(fd, callback):
    """Helper which instantiate and run an AsynchronousFileReader."""
    queue = SimpleQueue()
    reader = AsynchronousFileReader(fd, queue)
    reader.start()
    consummer = Process(target=consume_queue, args=(queue, callback))
    consummer.start()
    return (reader, consummer)
Esempio n. 11
0
 def add_server(self, app_name, service_name, host, port, processor, use_simple_server=True, wait=1,
                use_ssl=False, ca_certs=None, cert=None, key=None):
     self.sd_client.register_endpoint(app_name, service_name, host, port)
     server_process = Process(target=self.__thrift_server,
                              args=(processor, host, port, use_simple_server, use_ssl, ca_certs, cert, key))
     server_process.start()
     time.sleep(wait)
     self.server_processes.append(server_process)
Esempio n. 12
0
def async_file_reading(fd, callback):
    """Helper which instantiate and run an AsynchronousFileReader."""
    queue = SimpleQueue()
    reader = AsynchronousFileReader(fd, queue)
    reader.start()
    consummer = Process(target=consume_queue, args=(queue, callback))
    consummer.start()
    return (reader, consummer)
Esempio n. 13
0
def main():
    auth_server = Process(target=run_auth_server)
    auth_server.start()

    def sigint_handler(signal, frame):
        print("Terminating servers...")
        auth_server.terminate()
        auth_server.join()

    signal.signal(signal.SIGINT, sigint_handler)
Esempio n. 14
0
def test_mcdpweb_server(dirname):
    port = random.randint(11000, 15000)
    base = 'http://127.0.0.1:%s' % port

    p = Process(target=start_server, args=(dirname, port,))
    p.start()

    print('sleeping')
    time.sleep(5)

    try:
        url_wrong = base + '/not-existing'
        urllib2.urlopen(url_wrong).read()
    except HTTPError:
        pass
    else:
        raise Exception('Expected 404')

    # now run the spider
    tmpdir = tempfile.mkdtemp(prefix='wget-output')
    cwd = '.'
    cmd = ['wget', '-nv', '-P', tmpdir, '-m', base]
#     res = system_cmd_result(
#             cwd, cmd,
#             display_stdout=True,
#             display_stderr=True,
#             raise_on_error=True)
    sub = subprocess.Popen(
                cmd,
                bufsize=0,
                cwd=cwd)
    sub.wait()

    exc = get_exceptions(port)

    if len(exc) == 0:
        msg = 'Expected at least a not-found error'
        raise Exception(msg)

    if not 'not-existing' in exc[0]:
        raise Exception('Could not find 404 error')

    exc = exc[1:]

    if exc:
        msg = 'Execution raised errors:\n\n'
        msg += str("\n---\n".join(exc))
        raise_desc(Exception, msg)

    url_exit = base + '/exit'
    urllib2.urlopen(url_exit).read()

    print('waiting for start_server() process to exit...')
    p.join()
    print('...clean exit')
Esempio n. 15
0
def test_mcdpweb_server(dirname):
    port = random.randint(11000, 15000)
    base = 'http://127.0.0.1:%s' % port

    p = Process(target=start_server, args=(
        dirname,
        port,
    ))
    p.start()

    print('sleeping')
    time.sleep(5)

    try:
        url_wrong = base + '/not-existing'
        urllib2.urlopen(url_wrong).read()
    except HTTPError:
        pass
    else:
        raise Exception('Expected 404')

    # now run the spider
    tmpdir = tempfile.mkdtemp(prefix='wget-output')
    cwd = '.'
    cmd = ['wget', '-nv', '-P', tmpdir, '-m', base]
    #     res = system_cmd_result(
    #             cwd, cmd,
    #             display_stdout=True,
    #             display_stderr=True,
    #             raise_on_error=True)
    sub = subprocess.Popen(cmd, bufsize=0, cwd=cwd)
    sub.wait()

    exc = get_exceptions(port)

    if len(exc) == 0:
        msg = 'Expected at least a not-found error'
        raise Exception(msg)

    if not 'not-existing' in exc[0]:
        raise Exception('Could not find 404 error')

    exc = exc[1:]

    if exc:
        msg = 'Execution raised errors:\n\n'
        msg += str("\n---\n".join(exc))
        raise_desc(Exception, msg)

    url_exit = base + '/exit'
    urllib2.urlopen(url_exit).read()

    print('waiting for start_server() process to exit...')
    p.join()
    print('...clean exit')
Esempio n. 16
0
def main():
    auth_server = Process(target=run_auth_server)
    auth_server.start()
    print "Access http://10.10.112.59:8081/app in your browser"

    def sigint_handler(signal, frame):
        print "Terminating servers..."
        auth_server.terminate()
        auth_server.join()

    signal.signal(signal.SIGINT, sigint_handler)
Esempio n. 17
0
def main():
    app_server = Process(target=run_app_server)
    app_server.start()
    print("Access http://localhost:8081/app in your browser")

    def sigint_handler(signal, frame):
        print("Terminating servers...")
        app_server.terminate()
        app_server.join()

    signal.signal(signal.SIGINT, sigint_handler)
Esempio n. 18
0
def _launch(type_name, config, is_important):
    plugin_clazz = _get_type(type_name)
    if not plugin_clazz:
        logging.warn('could not find %s plugin' % type_name)

    d = plugin_clazz(config)

    p = Process(target=d.start)
    p.daemon = not is_important
    p.name = 'plugin: %s' % d.name
    p.start()
Esempio n. 19
0
 def __init__(self):
     self.__actualSpeed = Value('f', 0.0)
     self._targetSpeed = Value('f', 0.0)
     
     self.__actualDirection = Value('f', 0.0)
     self._targetDirection = Value('f', 0.0)
 
     self.__setupMotors()
     
     steering = Process(target=self.steer)
     #steering.daemon = True
     steering.start()
Esempio n. 20
0
def import_library_in_another_process(path, args):
    q = Queue(maxsize=1)
    p = Process(target=library_initializer, args=(q, path, args))
    p.start()
    while True:
        try:
            result = q.get(timeout=0.1)
            if isinstance(result, Exception):
                raise ImportError(result)
            return result
        except Empty:
            if not p.is_alive():
                raise ImportError()
Esempio n. 21
0
def main():
    
    ''' parse the command line - new up the appl and listen on port '''
    if os.path.isfile("../kew_pe.conf"):
        print ("Loading config file ../kew_pe.conf")
        options.parse_config_file("../kew_pe.conf")
    options.parse_command_line()
        
    logging.basicConfig(level=logging.DEBUG)
    #report_control.process('localhost',8081)
    
    process = Process(target=report_control.run, name="report_control", kwargs={'host':'localhost', 'port':8081})
    process.daemon = True
    process.start()           
Esempio n. 22
0
def start_sched():
    j_logger.info(u'starting job scheduler ...')
    jobs = get_all_jobs()
    for job in jobs:
        j_logger.info(u'starting job %s ' % job.name)
        job.run_status = Value('i', 0) #job的状态值
        try:
            p = Process(target=_inner_job, name=job.name,args=(job,))
            p.start()
            job.process_id = p.pid
            j_logger.info(u'job %s started !' % job.name)
        except Exception as e:
            j_logger.error(u'job %s fail to start,due to [%s]!' % (job.name,e))
    register_signal_notify()
    j_logger.info(u'job scheduler started !')
Esempio n. 23
0
def main():
    auth_server = Process(target=run_auth_server)
    auth_server.start()
    app_server = Process(target=run_app_server)
    app_server.start()
    print("Access http://localhost:8081/app in your browser")

    def sigint_handler(signal, frame):
        print("Terminating servers...")
        auth_server.terminate()
        auth_server.join()
        app_server.terminate()
        app_server.join()

    signal.signal(signal.SIGINT, sigint_handler)
Esempio n. 24
0
class Downloader(object):
    def __init__(self, timeout=30, retries=100, wait=1):
        self.timeout = timeout
        self.retries = retries
        self.wait = wait
        
        self.manager = SyncManager()
        self.manager.start()
        
    def retry_fetch_data(self, url):
        market_data = self.fetch_data(url)
        
        retries = 1
        while not market_data and retries < self.retries:
            print "Retry #%s..." % str(retries)
            market_data = self.fetch_data(url)
            if market_data:
                print "Fetched: " + str(len(market_data))
            else:
                print "Fetched nothing!"
            retries += 1
        
        return market_data
    
    def fetch_data(self, url):
        limit = 60
        msg = "Downloading " + url[0: min(limit, len(url))] 
        if len(url) > limit:
            msg += "(+" + str(len(url) - limit) + ")"
        print msg
            
        return_dict = self.manager.dict()
        self.job = Process(target=get_page_data, args=(url, return_dict))
        self.job.start()
        
        self.job.join(self.timeout)
        if self.job.is_alive():
            self.job.terminate()
        self.job = None
        
        market_data = None
        if 'page' in return_dict:
            market_data = return_dict['page']
        
        if self.wait > 0:
            time.sleep(self.wait)
        
        return market_data
Esempio n. 25
0
def start_sched():
    j_logger.info(u'starting job scheduler ...')
    jobs = get_all_jobs()
    for job in jobs:
        j_logger.info(u'starting job %s ' % job.name)
        job.run_status = Value('i', 0)  #job的状态值
        try:
            p = Process(target=_inner_job, name=job.name, args=(job, ))
            p.start()
            job.process_id = p.pid
            j_logger.info(u'job %s started !' % job.name)
        except Exception as e:
            j_logger.error(u'job %s fail to start,due to [%s]!' %
                           (job.name, e))
    register_signal_notify()
    j_logger.info(u'job scheduler started !')
Esempio n. 26
0
def main():
    auth_server = Process(target=run_auth_server)
    auth_server.start()
    print("To test getting an auth token, execute the following curl command:")
    print(
        "curl --ipv4 -v -X POST"
        " -d 'grant_type=client_credentials&client_id=abc&client_secret=xyz' "
        "http://localhost:8080/token"
    )

    def sigint_handler(signal, frame):
        print("Terminating server...")
        auth_server.terminate()
        auth_server.join()

    signal.signal(signal.SIGINT, sigint_handler)
Esempio n. 27
0
 def set_from_file(self, varfile_path, args):
     q = Queue()
     p = Process(target=set_from_file, args=(q, varfile_path, args))
     p.start()
     p.join()
     there_are_results = False
     while True:
         try:
             results = q.get_nowait()
             there_are_results  = True
             if len(results) == 1:
                 raise DataError(results[0])
             self.set(*results)
         except Empty:
             if not there_are_results:
                 raise DataError('No variables')
             return
Esempio n. 28
0
 def _start(self,name,cpu, module_name, class_name, params):
     fn = None
     
     self._processes = []
     self._in_queue = Queue()
     self._out_queue = Queue()
     self._log_queue = Queue()
     
     if name == "mapper":
         fn = q_run_mapper
     elif name == "reducer":
         fn = q_run_reducer
     
     for i in range(cpu):
         process = Process(target=fn,args=(module_name, class_name ,params, self._in_queue, self._out_queue, self._log_queue))
         self._processes.append(process)
         process.start()
Esempio n. 29
0
class FileResultsLogger(ResultsLogger):

    process = None
    queue = None
    logFileName = None

    def __init__(self, args):
        self.queue = Queue()
        if args.no_p2p:
            fileName = '_'.join([args.mem_algo, str(args.mem_size), 'no_p2p'])
        elif args.no_mem:
            fileName = '_'.join(
                [args.p2p_net, 'no_mem', args.p2p_algo,
                 str(args.p2p_size)])
        else:
            fileName = '_'.join([
                args.p2p_net, args.mem_algo,
                str(args.mem_size), args.p2p_algo,
                str(args.p2p_size)
            ])
        self.logFileName = 'logs/' + fileName + '.logs'

    def logRequest(self, nodeId, address, latency, cacheLevel=-1):
        ResultsLogger.logRequest(self,
                                 nodeId,
                                 address,
                                 latency,
                                 cacheLevel=cacheLevel)
        self.queue.put("{}\t{}\t{}\t{}\n".format(nodeId, address, latency,
                                                 cacheLevel))

    def finish(self):
        self.queue.put("STOP")
        self.process.join()

    def writerProcess(self):
        fileObject = open(self.logFileName, 'w+')
        for logEntry in iter(self.queue.get, "STOP"):
            fileObject.write(logEntry)
        fileObject.close()

    def start(self):
        self.process = Process(target=self.writerProcess)
        self.process.start()
Esempio n. 30
0
 def test_cleanup(self):
     """
     Test the cleanup thread
     """
     # We want the cleanup thread to run in this context
     # but it is an infitite loop.  So let's delay a shtudown.
     sd2 = "/tmp/shut2"
     rec = {
         'address': '1.2.3.4',
         'ip': '10.128.0.1',
         'router': 'router',
         'last_associated': time(),
         'end_time': 0,
         'uid': 501,
         'user': '******',
         'jobid': '1233',
         'status': 'used'
     }
     self.db.routes2.insert(rec)
     # We want to shutdown the thread that is started on init
     with open(sd2, 'w') as f:
         f.write('1')
     settings = self.settings.copy()
     settings['COLLECTION'] = 'routes2'
     settings['SHUTFILE'] = sd2
     rt = router.Router(settings)
     # Wait for the init thread to shutdown
     sleep(0.2)
     # Now let's start our own
     if os.path.exists(sd2):
         os.remove(sd2)
     shut = Process(target=self._shutdown)
     shut.start()
     rt.cleanup()
     shut.terminate()
     r = self.db.routes2.find_one({'address': '1.2.3.4'})
     self.assertEquals(r['status'], 'available')
     self.db.routes2.remove({})
     rv = rt.cleanup()
     self.assertEquals(-1, rv)
Esempio n. 31
0
class ChordNode:

    process = None
    node = None

    def createNode(self, nodeId, port, cacheStorage, knownHosts):
        BlockingChordNode(nodeId, knownHosts, port, cacheStorage)

    def __init__(self, knownHosts, port=4000, cacheStorage=None):
        # TODO id?
        nodeId = random.randrange(NETWORK_SIZE)

        self.process = Process(target=self.createNode,
                               args=(nodeId, port, cacheStorage, knownHosts))
        self.process.start()
        self.node = ServerProxy(('', port), nodeId, ('localhost', port))

    def terminate(self):
        """
        Kill node process
        """
        self.process.terminate()
Esempio n. 32
0
def prepare_proxies(configdata):
    
    if configdata[const.PROXY_CONFIG].get(const.PROXY_CONFIG_SOURCE_TYPE, u'1') != u'2':
        return 
    
    p = Process(group=None, target=fetch_proxy,)
    p.start()
    p.join()
    
    print u'%s get %d free proxy' % (datetime.datetime.now(),
                                   len(open(u'proxy.txt', u'r').readlines()))
    
    c = Process(group=None, target=valid_proxy,)
    c.start()
    
    valid_time = int(configdata[const.PROXY_CONFIG].get(const.PROXY_VALID_TIME))
    print u'%s following %d seconds will valid the proxy' % (datetime.datetime.now(), valid_time)
    time.sleep(valid_time)
    c.terminate()
    
    print u'%s get %d effective proxy' % (datetime.datetime.now(),
                                len(open(u'enable_proxies.txt', u'r').readlines()))
Esempio n. 33
0
def start_actor_server(
        actor,
        host_port_tuple=None,
        start_in_background_thread=False,
        log_requests=False,
        server_starter=start_bjoern_server
        ):
    if server_starter != start_bjoern_server:
        print("Using builtin server (slow)")

    if not host_port_tuple:
        # if no specific port is given,
        # run on free port
        host_port_tuple = ('', 0)

    port = host_port_tuple[1]
    if host_port_tuple[1] == 0:
        port = get_free_port(host_port_tuple)

    host_name = get_host_name(host_port_tuple[0])

    host_uri = 'http://{0}:{1}'.format(host_name, port)
    print("Running server on {0}".format(host_uri))

    wsgi_application = get_wsgi_application(
        actor, host_uri,
        log_requests=log_requests
    )

    if start_in_background_thread:
        process = Process(
            target=server_starter,
            args=(wsgi_application, host_name, port)
        )
        process.daemon = True
        process.start()
        return host_uri, process

    server_starter(wsgi_application, host_name, port)
Esempio n. 34
0
 def run(self):
     items = get_file_list(self.path_list)     #取得所有文件列表,读取文件交给了map_workers
     self.file_list = items
     self.total_count = len(items)
     item_step_count = self.total_count / self.map_worker_count  #每个worker分得的任务数量
     print u'文件总数:%d' % self.total_count
     
     for i in range(self.map_worker_count):
         name = 'pre_worker@%d' % (i + 1)
         p = Process(target=add_worker,args=(name,self.task_queue,self.result_queue))
         self.worker_process_list.append(p)
     
     self.start_time = time.clock()
     self.last_refresh_time = time.clock()
     print u'开始时间:',datetime.now()
     for p in self.worker_process_list:
         p.start()
     
     t_reader = threading.Thread(target=self.put_task_thread)
     t_writer = threading.Thread(target=self.get_result_thread)
     t_reader.start()
     t_writer.start()
     print '------------------------'
Esempio n. 35
0
def process_mongo(args, output_path):
    # connects to the MongoDB server
    if args.port:
        connection = Connection(args.address, args.port)
    else:
        connection = Connection(args.address)
    
    # gets the DB
    db = connection[args.db_name]
    
    # gets the collection
    collec = db[args.collection]
    
    # sets the number of documents to be processed by each thread
    docs_num = collec.count()
    slice_size = docs_num / args.threads
    print "Threads:", args.threads
    print "Documents number:", docs_num
    print "Documents per thread:", slice_size

    # initiates a thread for each slice of documents
    # the slices are controlled using the base and offset variables
    base = 0
    offset = slice_size
    jobs = []
    for thread_num in range(args.threads):
        print "Initializing process", thread_num
        p = Process(target=lemmatize_slice, args=(collec, base, offset, args, output_path))
        jobs.append(p)
        p.start()
        base += offset
    
    for p in jobs:
        p.join()
    
    if (docs_num % 2) == 1:
        lemmatize_slice(collec, base, offset, args, output_path)
Esempio n. 36
0
def get_stream(hdf5_file, which_set, batch_size=None):
    dataset = TrajectoryDataset(which_sets=(which_set, ))
    if batch_size == None:
        batch_size = dataset.num_examples
    data_stream = DataStream(dataset=dataset,
                             iteration_scheme=ShuffledScheme(
                                 examples=dataset.num_examples,
                                 batch_size=batch_size))

    load_in_memory = os.path.getsize(
        hdf5_file) < 14 * 10**9 or which_set == 'test'
    if not load_in_memory:
        port = 5557 if which_set == 'train' else 5558
        print port
        server_process = Process(target=start_server,
                                 args=(data_stream, port, 10))
        server_process.start()
        data_stream = ServerDataStream(dataset.sources,
                                       False,
                                       host='localhost',
                                       port=port,
                                       hwm=10)

    return data_stream
Esempio n. 37
0
class RemoteTest(unittest.TestCase):
    """
    Test the Component, DataFlow, and VAs when shared remotely.
    The test cases are run as "clients" and at start a server is started.
    """
    container_name = "test"

    def setUp(self):
        # Use Thread for debug:
        if USE_THREADS:
            self.server = threading.Thread(target=ServerLoop,
                                           args=(self.container_name, ))
        else:
            self.server = Process(target=ServerLoop,
                                  args=(self.container_name, ))
        self.server.start()

        self.count = 0
        self.data_arrays_sent = 0
        time.sleep(0.1)  # give it some time to start
        self.rdaemon = Pyro4.Proxy("PYRO:Pyro.Daemon@./u:" +
                                   self.container_name)
        self.comp = self.rdaemon.getObject("mycomp")

    def tearDown(self):
        self.comp.stopServer()
        time.sleep(0.1)  # give it some time to terminate

        if self.server.is_alive():
            if not USE_THREADS:
                print "Warning: killing server still alive"
                self.server.terminate()

#    @unittest.skip("simple")

    def test_simple(self):
        """
        start a component, ping, and stop it
        """

        ret = self.comp.ping()
        self.assertEqual(ret, "pong", "Ping failed")

#    @unittest.skip("simple")

    def test_exception(self):

        # test it raises
        self.assertRaises(MyError, self.comp.bad_call)

        # test it raises when wrong argument
        self.assertRaises(TypeError, self.comp.ping, ("non needed arg", ))

        # non existing method
        self.assertRaises(AttributeError, self.comp.non_existing_method)

#    @unittest.skip("simple")

    def test_roattributes(self):
        """
        check roattributes
        """
        val = self.comp.my_value
        self.assertEqual(val, "ro", "Reading attribute failed")

#    @unittest.skip("simple")

    def test_async(self):
        """
        test futures
        MyComponent queues the future in order of request
        """
        self.comp.set_number_futures(0)

        ft1 = self.comp.do_long(2)  # long enough we can cancel ft2
        ft2 = self.comp.do_long(1)  # shorter than ft1
        self.assertFalse(ft1.done(), "Future finished too early")
        self.assertFalse(ft2.done(), "Future finished too early")
        self.assertFalse(ft2.cancelled(),
                         "future doesn't claim being cancelled")
        self.assertFalse(ft2.cancelled(),
                         "future doesn't claim being cancelled")
        self.assertGreater(ft2.result(), 1)  # wait for ft2
        self.assertFalse(ft2.cancel(), "could cancel the finished future")

        self.assertTrue(ft1.done(), "Future not finished")
        self.assertGreater(ft1.result(), 2)

        self.assertEqual(self.comp.get_number_futures(), 2)

#    @unittest.skip("simple")

    def test_unref_futures(self):
        """
        test many futures which don't even get referenced
        It should behave as if the function does not return anything
        """
        self.comp.set_number_futures(0)

        expected = 100  # there was a bug with expected > threadpool size (=24)
        start = time.time()
        for i in range(expected):
            self.comp.do_long(0.1)

        ft_last = self.comp.do_long(0.1)
        ft_last.result()
        duration = time.time() - start
        self.assertGreaterEqual(duration, expected * 0.1)

        self.assertEqual(self.comp.get_number_futures(), expected + 1)

#    @unittest.skip("simple")

    def test_ref_futures(self):
        """
        test many futures which get referenced and accumulated
        It should behave as if the function does not return anything
        """
        self.comp.set_number_futures(0)
        small_futures = []

        expected = 100  # there was a bug with expected > threadpool size (=24)
        start = time.time()
        for i in range(expected):
            small_futures.append(self.comp.do_long(0.1))

        ft_last = self.comp.do_long(0.1)
        ft_last.result()
        duration = time.time() - start
        self.assertGreaterEqual(duration, expected * 0.1)

        for f in small_futures:
            self.assertTrue(f.done())

        self.assertEqual(self.comp.get_number_futures(), expected + 1)

#    @unittest.skip("simple")

    def test_async_cancel(self):
        """
        test futures
        """
        self.comp.set_number_futures(0)

        ft1 = self.comp.do_long(2)  # long enough we can cancel ft2
        ft2 = self.comp.do_long(1)  # shorter than ft1
        self.assertTrue(ft2.cancel(), "couldn't cancel the future")
        self.assertTrue(ft2.cancelled(),
                        "future doesn't claim being cancelled")
        self.assertRaises(futures.CancelledError, ft2.result)

        # wait for ft1
        res1a = ft1.result()
        self.assertGreater(res1a, 2)
        self.assertTrue(ft1.done(), "Future not finished")
        res1b = ft1.result()
        self.assertEqual(res1a, res1b)
        self.assertGreater(res1b, 2)

        self.assertEqual(self.comp.get_number_futures(), 2)

    def test_prog_future(self):
        """
        Test ProgressiveFuture (remotely)
        """
        self._done_calls = 0
        self._progess_calls = 0
        self._start = None
        self._end = None
        ft = self.comp.do_progressive_long(5)
        ft.add_update_callback(self._on_future_proress)
        ft.add_done_callback(self._on_future_done)

        # we should have received at least one progress update already
        self.assertGreaterEqual(self._progess_calls, 1)

        ft.result()
        self.assertGreaterEqual(self._progess_calls, 5)
        self.assertEqual(self._done_calls, 1)

    def _on_future_done(self, f):
        self._done_calls += 1

    def _on_future_proress(self, f, start, end):
        self._progess_calls += 1
        logging.info("Received future update for %f -> %f", start, end)

        self._start = start
        self._end = end

#    @unittest.skip("simple")

    def test_subcomponents(self):
        # via method and via roattributes
        # need to test cycles

        p = self.rdaemon.getObject("parent")
        self.assertEqual(len(p.children.value), 1,
                         "parent doesn't have one child")
        c = list(p.children.value)[0]
        #        self.assertEqual(c.parent, p, "Component and parent of child is different")
        self.assertEqual(p.value, 42)
        self.assertEqual(c.value, 43)
        self.assertEqual(len(c.children.value), 0,
                         "child shouldn't have children")

#    @unittest.skip("simple")

    def test_dataflow_subscribe(self):
        self.count = 0
        self.expected_shape = (2048, 2048)
        self.data_arrays_sent = 0
        self.comp.data.reset()

        self.comp.data.subscribe(self.receive_data)
        time.sleep(0.5)
        self.comp.data.unsubscribe(self.receive_data)
        count_end = self.count
        print "received %d arrays over %d" % (self.count,
                                              self.data_arrays_sent)

        time.sleep(0.1)
        self.assertEqual(count_end, self.count)
        self.assertGreaterEqual(count_end, 1)

    def test_synchronized_df(self):
        """
        Tests 2 dataflows, one synchronized on the event of acquisition started
        on the other dataflow.
        """
        number = 20
        self.count = 0
        self.left = number
        self.expected_shape = (2, 2)
        self.expected_shape_au = (2048, 2048)
        self.data_arrays_sent = 0
        dfe = self.comp.data
        dfs = self.comp.datas
        dfe.reset()

        dfs.synchronizedOn(self.comp.startAcquire)
        dfs.subscribe(self.receive_data)

        time.sleep(
            0.2)  # long enough to be after the first data if it generates data
        # ensure that datas hasn't generated anything yet
        self.assertEqual(self.count, 0)

        dfe.subscribe(self.receive_data_auto_unsub)
        for i in range(number):
            # end early if it's already finished
            if self.left == 0:
                break
            time.sleep(0.2)  # 0.2s should be more than enough in any case

        self.assertEqual(0, self.left)
        self.assertEqual(number, self.count)
        print "received %d arrays over %d" % (self.count,
                                              self.data_arrays_sent)
        max_lat = dfs.get_max_lat()
        if max_lat:
            print "latency: %r, max= %f, avg= %f" % (
                max_lat, max(max_lat), sum(max_lat) / len(max_lat))

        time.sleep(0.1)
        self.assertEqual(number, self.count)
        dfs.unsubscribe(self.receive_data)
        dfs.synchronizedOn(None)
        time.sleep(0.1)
        self.assertEqual(number, self.count)

#    @unittest.skip("simple")

    def test_dataflow_stridden(self):
        # test that stridden array can be passed (even if less efficient)
        self.count = 0
        self.data_arrays_sent = 0
        self.expected_shape = (2048, 2045)
        self.comp.cut.value = 3
        self.comp.data.reset()

        self.comp.data.subscribe(self.receive_data)
        time.sleep(0.5)
        self.comp.data.unsubscribe(self.receive_data)
        self.comp.cut.value = 0  # put it back
        count_end = self.count
        print "received %d stridden arrays over %d" % (self.count,
                                                       self.data_arrays_sent)

        time.sleep(0.1)
        self.assertEqual(count_end, self.count)
        self.assertGreaterEqual(count_end, 1)

    def test_dataflow_empty(self):
        """
        test passing empty DataArray
        """
        self.count = 0
        self.data_arrays_sent = 0
        self.comp.data.setShape((0, ), 16)
        self.expected_shape = (0, )

        self.comp.data.subscribe(self.receive_data)
        time.sleep(0.5)
        self.comp.data.unsubscribe(self.receive_data)
        count_end = self.count
        print "received %d stridden arrays over %d" % (self.count,
                                                       self.data_arrays_sent)

        time.sleep(0.1)
        self.assertEqual(count_end, self.count)
        self.assertGreaterEqual(count_end, 1)

    def receive_data(self, dataflow, data):
        self.count += 1
        self.assertEqual(data.shape, self.expected_shape)
        if data.ndim >= 2:
            self.data_arrays_sent = data[0][0]
            self.assertGreaterEqual(self.data_arrays_sent, self.count)

    def receive_data_auto_unsub(self, dataflow, data):
        """
        callback for df
        """
        self.assertEqual(data.shape, self.expected_shape_au)
        self.left -= 1
        if self.left <= 0:
            dataflow.unsubscribe(self.receive_data_auto_unsub)

    def receive_data_and_unsubscribe(self, dataflow, data):
        self.count += 1
        self.assertEqual(data.shape, (2048, 2048))
        self.data_arrays_sent = data[0][0]
        self.assertGreaterEqual(self.data_arrays_sent, self.count)
        dataflow.unsubscribe(self.receive_data_and_unsubscribe)

#    @unittest.skip("simple")

    def test_dataflow_unsubscribe_from_callback(self):
        self.count = 0
        self.data_arrays_sent = 0
        self.comp.data.reset()

        self.comp.data.subscribe(self.receive_data_and_unsubscribe)
        time.sleep(0.3)
        self.assertEqual(self.count, 1)
        # It should be 1, or if the generation went very fast, it might be bigger
        self.assertGreaterEqual(self.data_arrays_sent, 1)
#        print "received %d arrays over %d" % (self.count, self.data_arrays_sent)

#        data = comp.data
#        del comp
#        print gc.get_referrers(data)
#        gc.collect()
#        print gc.get_referrers(data)

#    @unittest.skip("simple")

    def test_dataflow_get(self):
        self.comp.data.reset()
        array = self.comp.data.get()
        self.assertEqual(array.shape, (2048, 2048))
        self.assertEqual(array[0][0], 0)

        array = self.comp.data.get()
        self.assertEqual(array.shape, (2048, 2048))
        self.assertEqual(array[0][0], 0)

#    @unittest.skip("simple")

    def test_va_update(self):
        prop = self.comp.prop
        self.assertIsInstance(prop, VigilantAttributeBase)
        self.assertEqual(prop.value, 42)
        prop.value += 1
        self.assertEqual(prop.value, 43)

        self.called = 0
        self.last_value = None
        prop.subscribe(self.receive_va_update)
        # now count
        prop.value = 3  # +1
        prop.value = 0  # +1
        prop.value = 0  # nothing because same value
        time.sleep(0.1)  # give time to receive notifications
        prop.unsubscribe(self.receive_va_update)

        self.assertEqual(prop.value, 0)
        self.assertEqual(self.last_value, 0)
        # called once or twice depending if the brief 3 was seen
        self.assertTrue(1 <= self.called and self.called <= 2)
        called_before = self.called

        # check we are not called anymore
        prop.value = 3  # +1
        self.assertEqual(self.called, called_before)

        # re-subscribe
        prop.subscribe(self.receive_va_update)
        # change remotely
        self.comp.change_prop(45)
        time.sleep(0.1)  # give time to receive notifications
        self.assertEqual(prop.value, 45)
        prop.unsubscribe(self.receive_va_update)
        self.assertEqual(self.called, called_before + 1)

        try:
            prop.value = 7.5
            self.fail("Assigning float to a int should not be allowed.")
        except TypeError:
            pass  # as it should be

    def receive_va_update(self, value):
        self.called += 1
        self.last_value = value
        self.assertIsInstance(value, (int, float))

#    @unittest.skip("simple")

    def test_enumerated_va(self):
        # enumerated
        self.assertEqual(self.comp.enum.value, "a")
        self.assertEqual(self.comp.enum.choices, set(["a", "c", "bfds"]))
        self.comp.enum.value = "c"
        self.assertEqual(self.comp.enum.value, "c")

        try:
            self.comp.enum.value = "wfds"
            self.fail("Assigning out of bound should not be allowed.")
        except IndexError:
            pass  # as it should be

    def test_continuous_va(self):
        # continuous
        self.assertEqual(self.comp.cont.value, 2)
        self.assertEqual(self.comp.cont.range, (-1, 3.4))

        self.comp.cont.value = 3.0
        self.assertEqual(self.comp.cont.value, 3)

        try:
            self.comp.cont.value = 4.0
            self.fail("Assigning out of bound should not be allowed.")
        except IndexError:
            pass  # as it should be

    def test_list_va(self):
        # List
        l = self.comp.listval
        self.assertEqual(len(l.value), 2)
        self.called = 0
        l.subscribe(self.receive_listva_update)
        l.value += [3]
        self.assertEqual(len(l.value), 3)
        time.sleep(0.01)
        l.value[-1] = 4
        self.assertEqual(l.value[-1], 4)
        time.sleep(0.01)
        l.value.reverse()
        self.assertEqual(l.value[0], 4)
        time.sleep(0.1)
        self.assertEqual(self.called, 3)
        l.unsubscribe(self.receive_listva_update)

    def receive_listva_update(self, value):
        self.called += 1
        self.last_value = value
        self.assertIsInstance(value, list)
Esempio n. 38
0
class AuthorizationCodeTestCase(unittest.TestCase):
    def setUp(self):
        self.client = None
        self.server = None

    def test_tornado(self):
        def run_provider(queue):
            try:
                provider = create_provider()

                app = TornadoApplication([
                    url(r"/authorize", OAuth2Handler, dict(provider=provider)),
                    url(r"/token", OAuth2Handler, dict(provider=provider))
                ], debug=True)
                app.listen(15486)

                queue.put({"result": 0})

                IOLoop.current().start()
            except Exception as e:
                queue.put({"result": 1, "error_message": str(e)})

        ready_queue = Queue()

        self.server = Process(target=run_provider, args=(ready_queue,))
        self.server.start()

        provider_started = ready_queue.get()

        if provider_started["result"] != 0:
            raise Exception("Error starting Provider process with message"
                            "'{0}'".format(provider_started["error_message"]))

        self.client = Process(target=run_client, args=(ready_queue,))
        self.client.start()

        client_started = ready_queue.get()

        if client_started["result"] != 0:
            raise Exception("Error starting Client Application process with "
                            "message '{0}'"
                            .format(client_started["error_message"]))

        self.access_token()

    def test_wsgi(self):
        def run_provider(queue):
            try:
                provider = create_provider()

                app = Application(provider=provider)

                httpd = make_server('', 15486, app,
                                    handler_class=NoLoggingHandler)

                queue.put({"result": 0})

                httpd.serve_forever()
            except Exception as e:
                queue.put({"result": 1, "error_message": str(e)})

        ready_queue = Queue()

        self.server = Process(target=run_provider, args=(ready_queue,))
        self.server.start()

        provider_started = ready_queue.get()

        if provider_started["result"] != 0:
            raise Exception("Error starting Provider process with message"
                            "'{0}'".format(provider_started["error_message"]))

        self.client = Process(target=run_client, args=(ready_queue,))
        self.client.start()

        client_started = ready_queue.get()

        if client_started["result"] != 0:
            raise Exception("Error starting Client Application process with "
                            "message '{0}'"
                            .format(client_started["error_message"]))

        self.access_token()

    def test_wsgi_404(self):
        def run_provider(queue):
            try:
                provider = create_provider()

                app = Application(provider=provider)

                httpd = make_server('', 15486, app,
                                    handler_class=NoLoggingHandler)

                queue.put({"result": 0})

                httpd.serve_forever()
            except Exception as e:
                queue.put({"result": 1, "error_message": str(e)})

        ready_queue = Queue()

        self.server = Process(target=run_provider, args=(ready_queue,))
        self.server.start()

        provider_started = ready_queue.get()

        if provider_started["result"] != 0:
            raise Exception("Error starting Provider process with message"
                            "'{0}'".format(provider_started["error_message"]))

        try:
            urlopen("http://127.0.0.1:15486/invalid-path").read()
        except HTTPError as e:
            self.assertEqual(404, e.code)

    def access_token(self):
        uuid_regex = "^[a-z0-9]{8}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{4}-[a-z0-9]{12}$"

        try:
            access_token_result = urlopen("http://127.0.0.1:15487/app").read()
        except HTTPError as e:
            print(e.read())
            exit(1)

        access_token_data = json.loads(access_token_result.decode('utf-8'))

        self.assertEqual(access_token_data["token_type"], "Bearer")
        self.assertEqual(access_token_data["expires_in"], 120)
        self.assertRegexpMatches(access_token_data["access_token"],
                                 uuid_regex)
        self.assertRegexpMatches(access_token_data["refresh_token"],
                                 uuid_regex)

        request_data = {"grant_type": "refresh_token",
                        "refresh_token": access_token_data["refresh_token"],
                        "client_id": "abc",
                        "client_secret": "xyz"}

        refresh_token_result = urlopen(
            "http://127.0.0.1:15486/token",
            urlencode(request_data).encode('utf-8')
        )

        refresh_token_data = json.loads(refresh_token_result.read().decode('utf-8'))

        self.assertEqual(refresh_token_data["token_type"], "Bearer")
        self.assertEqual(refresh_token_data["expires_in"], 120)
        self.assertRegexpMatches(refresh_token_data["access_token"],
                                 uuid_regex)

    def tearDown(self):
        if self.client is not None:
            self.client.terminate()
            self.client.join()

        if self.server is not None:
            self.server.terminate()
            self.server.join()
Esempio n. 39
0
                toBeAdded.append(server)
            if len(toBeAdded) == how_many:
                break

        for server in toBeAdded:
            rest.add_node('Administrator', 'password', server.ip)
            #check if its added ?
        nodes = rest.node_statuses()
        otpNodes = [node.id for node in nodes]
        started = rest.rebalance(otpNodes, [])
        msg = "rebalance operation started ? {0}"
        self.log.info(msg.format(started))
        if started:
            result = rest.monitorRebalance()
            msg = "successfully rebalanced out selected nodes from the cluster ? {0}"
            self.log.info(msg.format(result))
            return result
        return False


if __name__ == "__main__":
    process1 = Process(target=start_load, args=(sys.argv,))
    process1.start()
    process2 = Process(target=start_combo, args=(sys.argv,))
    process2.start()
    process3 = Process(target=start_backup, args=(sys.argv,))
    process3.start()
    process1.join()
    process2.join()
    process3.join()
Esempio n. 40
0
 def evaluate(self, session, starting_set):
     
     step = len(self.names) / self.nr_cores
 
     procs = []
     conns = []
     results = []
     
     invert = False
     invert_count = False
     if self.wildcard_rule.startswith('!'):
         corrected_rule = self.wildcard_rule[1:]
         if self.rule_enforcement == 'all':
             target_rule = 'any'
             invert = True
         elif self.rule_enforcement == 'any':
             target_rule = 'all'
             invert = True
         elif self.rule_enforcement == 'none':
             target_rule = 'all'
         elif self.rule_enforcement.startswith('count'):
             target_rule = 'count'
             invert_count = True
     else:
         target_rule = self.rule_enforcement
         corrected_rule = self.wildcard_rule
         
     if starting_set == "*" and (invert or target_rule == 'none' or target_rule == 'count'):
         correct_starting_set = array.array('i', rows_as_set(session.execute("SELECT variant_id FROM variants")))
     elif starting_set != "*":
         correct_starting_set = array.array('i', starting_set)
     else:
         correct_starting_set = starting_set
     
     for i in range(self.nr_cores):
         parent_conn, child_conn = Pipe()
         conns.append(parent_conn)
         p = Process(target=eval(target_rule +'_query'),\
             args=(child_conn, self.column, corrected_rule,\
                 correct_starting_set, self.db_contact_points, self.keyspace))
         procs.append(p)
         p.start()
     
     #Split names in chunks and communicate to procs
     for i in range(self.nr_cores):
         n = len(self.names)
         begin = i*step + min(i, n % self.nr_cores)
         end = begin + step
         if i < n % self.nr_cores:
             end += 1  
         conns[i].send(self.names[begin:end])                
     
     #Collect results
     for i in range(self.nr_cores):
         results.append(conns[i].recv())
         conns[i].close()
     
     for i in range(self.nr_cores):
         procs[i].join()
     
     res = set()    
     
     if target_rule == 'any':
         for r in results:
             res = res | r
     elif target_rule in ['all', 'none']:
         res = results[0]
         for r in results[1:]:
             res = res & r
                             
     if invert:
         res = set(correct_starting_set) - res
     
     if target_rule == 'count':
         res_dict = {x: 0 for x in correct_starting_set}
         for sub_result_dict in results:
             for var, count in sub_result_dict.iteritems():
                 res_dict[var] += count     
         if invert_count:
             total = len(self.names)
             for variant, count in res_dict.iteritems():
                 res_dict[variant] = total - count
         res = set([variant for variant, count in res_dict.iteritems() \
                    if eval(str(count) + self.count_comp)])
     
     return res
Esempio n. 41
0
 def start(self):
     print("starting test")
     testing = Process(target=self.test)
     #testing.daemon = True
     testing.start()
Esempio n. 42
0
class Worker(_th.Thread):
    def __new__(cls, name=None):
        if name is None: name = 'default'
        if name in _workers.keys():
            return _workers[name]
        return super(Worker, cls).__new__(cls)

    def __init__(self, name=None):
        if name is None: name = 'default'
        if name in _workers.keys():
            return
        _workers[name] = self
        super(Worker, self).__init__()
        self.daemon = True
        self.name = name
        self._queue = _ver.queue.Queue(1)
        self.last_exception = None
        self._pon = _mp.Value('b', True)
        tsk, self.task = _mp.Pipe(False)
        self.out, res = _mp.Pipe(False)
        self.process = Process(target=process,
                               args=(self._pon, tsk, res),
                               name=name)
        self.process.start()
        self._on = True
        self.start()
        _time.sleep(1)

    def run(self):
        _sup.debug('%s started' % (str(self.name), ))
        while self._on or not self._queue.empty():
            try:
                result, target, args, kwargs = self._queue.get(True, .1)
                _sup.debug('%s: %s-task received' %
                           (str(self.name), target.__name__))
                self.task.send((target, args, kwargs))
                res = self.out.recv()
                del (result[self.name])
                _sup.debug(res)
                result[target.__name__] = res
                _sup.debug('%s: %s-task done' %
                           (str(self.name), target.__name__))
                self._queue.task_done()
            except _ver.queue.Empty:
                continue
            except KeyboardInterrupt as ki:
                raise ki
            except Exception as exc:
                _sup.debug('%s: %s' % (str(self.name), str(exc)), 0)
                if result is not None:
                    result[self.name] = exc
                self.last_exception = exc
        _sup.debug('%s: done!' % (str(self.name), ))
        self._pon.value = False
        del (_workers[self.name])

    def join(self):
        self._on = False
        self._queue.join()
        super(Worker, self).join()
        self._pon.value = False
        self.process.join()

    def put(self, target, *args, **kwargs):
        result = {self.name: target.__name__}
        self._queue.put((result, target, args, kwargs))
        _time.sleep(.1)
        return result
Esempio n. 43
0
    start_time = time.time()
    proc = Process(target=progress)
    if (os.path.exists('validationLog.log')):
        print "Log File exists. Making new one"
        os.remove('validationLog.log')
    logFile = open(
        'validationLog.log', 'a'
    )  #open a log file for output -> in case it already exists delete old one?
    try:
        TO_DO = [
            "Data", "Data_HLT", "FullSim", "FullSim_HLT", "FullSim_PU",
            "FullSim_PU_HLT", "FastSim", "FastSim_HLT", "FastSim_PU",
            "FastSim_PU_HLT", "FullFastSim", "FullFastSim_HLT", "Generator"
        ]

        proc.start()
        for i in TO_DO:
            do_some_work(i, logFile)

        #Do some work here
        proc.terminate()
        logFile.close()
        #os.system("echo 'Done.\nElapsed time:\n"+secondsToStr(time.time() - start_time)+"' | mail -s 'RelMon\n' [email protected]")
        os.system(
            "echo 'Done.\nElapsed time:\n%s' | mail -s 'RelMon' [email protected]"
            % (secondsToStr(time.time() - start_time)))
    except SystemExit:
        os.system(
            "echo 'Error' | mail -s 'RelMon' [email protected]")
        sys.exit()
    except:
Esempio n. 44
0
def start_example_app_process3():
    # XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
    p = Process(target=start_example_server3)
    p.start()
    return p
Esempio n. 45
0
def build_process(target):
    f51fp = Process(target=target, name=target.__name__)
    f51fp.daemon = 1
    f51fp.start()
    return f51fp
class WhenFunctionalTestingGameClient(unittest.TestCase):
    def setUp(self):
        #setup game server to run on a seperate process
        self.game_server = Process(target=start_server)
        self.game_server.start()

        #create the game client
        self.client = GameClient(host="127.0.0.1", port="5000")

        self.player_one = "Arthur"
        self.player_one_suspect = game_state.PEACOCK
        self.player_two = "Steven"
        self.player_two_suspect = game_state.PLUM

    def test_game_client(self):
        try:
            #give the game server process a chance to start
            time.sleep(3)

            #test registering players and choosing suspects
            self.client.register_player(self.player_one)
            self.client.choose_suspect(self.player_one,
                                       self.player_one_suspect)
            self.client.register_player(self.player_two)
            self.client.choose_suspect(self.player_two,
                                       self.player_two_suspect)

            #retreive the registered players with the client and validate the
            #return values
            players = self.client.get_players()
            for player in players:
                self.assertIsInstance(player, game_state.Player)
            self.assertTrue(
                self.player_one in [player.username for player in players])
            self.assertTrue(
                self.player_two in [player.username for player in players])
            self.assertTrue(self.player_one_suspect in
                            [player.suspect for player in players])
            self.assertTrue(self.player_two_suspect in
                            [player.suspect for player in players])

            #start a new game with the client and validate a GameState object
            #is returned
            game = self.client.start_new_game()
            self.assertTrue(game, game_state.GameState)

            game = self.client.get_game_state(game.game_id)
            self.assertTrue(game, game_state.GameState)

            #move player 1  from start space to hallway
            player = game.current_player
            player_1_current_space = game.game_board[player.suspect]
            move_space = player_1_current_space.connected_spaces[0]
            game = self.client.move_player(player.username, player.suspect,
                                           move_space)
            self.assertEqual(game.turn_status,
                             game_state.AWAITING_ACCUSATION_OR_END_TURN)
            game = self.client.end_turn(player.username)
            player_1_current_space = game.game_board[move_space]
            self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)

            #move player 2  from start space to hallway
            player = game.current_player
            player_2_current_space = game.game_board[player.suspect]
            move_space = player_2_current_space.connected_spaces[0]
            game = self.client.move_player(player.username, player.suspect,
                                           move_space)
            self.assertEqual(game.turn_status,
                             game_state.AWAITING_ACCUSATION_OR_END_TURN)
            game = self.client.end_turn(player.username)

            player_2_current_space = game.game_board[move_space]
            self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)

            #move player 1  from hallway to room
            player = game.current_player
            move_space = player_1_current_space.connected_spaces[0]
            game = self.client.move_player(player.username, player.suspect,
                                           move_space)
            self.assertEqual(game.turn_status, game_state.AWAITING_SUGGESTION)

            #make suggestion based on room player is currently in
            game = self.client.make_suggestion(player.username,
                                               game_state.MUSTARD,
                                               game_state.REVOLVER, move_space)

            #if there is a player that can prove the suggestion false
            #then test the suggestion response
            if game.suggestion_response_player:
                with self.assertRaises(errors.GameClientException):
                    game = self.client.move_player(player.username,
                                                   player.suspect, move_space)
                self.assertEqual(game.turn_status,
                                 game_state.AWAITING_SUGGESTION_RESPONSE)

                response_player = game.suggestion_response_player
                suggestion = game.current_suggestion
                gamecard_item = list(
                    {suggestion.weapon, suggestion.room, suggestion.suspect}
                    & set(card.item for card in response_player.game_cards))[0]
                game = self.client.make_suggestion_response(
                    response_player.username, gamecard_item)

            self.assertEqual(game.turn_status,
                             game_state.AWAITING_ACCUSATION_OR_END_TURN)
            game = self.client.end_turn(player.username)

            self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)

            last_player = player
            player = game.current_player
            self.assertNotEqual(player.username, last_player.username)

            #test accusation
            suspect = [
                card.item for card in game.case_file
                if card.type == game_state.SUSPECT
            ][0]
            weapon = [
                card.item for card in game.case_file
                if card.type == game_state.WEAPON
            ][0]
            room = [
                card.item for card in game.case_file
                if card.type == game_state.ROOM
            ][0]

            game = self.client.make_accusation(player.username, suspect,
                                               weapon, room)

            for message in game.player_messages:
                print message

            self.client.destroy_game(game.game_id)

        finally:
            self.game_server.terminate()
Esempio n. 47
0
    args = parser().parse_args()

    initLogger(args.log)
    #log.startLogging(open('logs/twisted.logs', 'w+'))
    factory = ArgsClientFactory(args)

    if args.spawn:
        nodes = []
        logging.info('Spawning {} processes'.format(args.spawn))
        startNode('Main node', factory, args.p2p_port, getKnownNodes(args))
        for i in range(1, args.spawn):
            process = Process(target=startNode,
                              args=('Node {}'.format(i), factory,
                                    args.p2p_port + i, [('localhost',
                                                         args.p2p_port)]))
            process.start()
            nodes.append(process)

        try:
            while 1:
                time.sleep(1)
        finally:
            for n in nodes:
                logging.info('Killing node {}'.format(n))
                n.terminate()
                n.join()
    else:
        node = startNode('P2P node', factory, args.p2p_port,
                         getKnownNodes(args))

        # pylint: disable=E1101
Esempio n. 48
0
class MongoRedis(object):
    def __init__(self, mongo_db, collection_name='cache'):
        # Ensure index
        if not isinstance(mongo_db, Database):
            raise ValueError(
                'mongo_db must be instance of pymongo.database.Database')
        self.col = mongo_db[collection_name]
        self.col.ensure_index('k', unique=True)
        self.col.ensure_index('exp')
        self.prune_expired()

    def start(self):
        """
        Starts the background process that prunes expired items
        """
        def task():
            while True:
                self.prune_expired()
                pytime.sleep(.5)

        self.processs = Process(target=task)
        self.processs.start()

    def end(self):
        """
        End the background process that prunes expired items
        """
        if not hasattr(self, 'process'):
            self.processs.terminate()

    def prune_expired(self):
        """
        Deletes expired keys from the db, returns count deleted
        """
        now = pytime.time()
        result = self.col.remove({'exp': {'$exists': True, '$lte': now}})
        return result['n']

    ### REDIS COMMANDS ###
    def delete(self, *names):
        """
        Delete one or more keys specified by ``names``
        """
        return self.col.remove({'k': {'$in': names}})['n']

    __delitem__ = delete

    def expire(self, name, time):
        """
        Set an expire flag on key ``name`` for ``time`` seconds. ``time``
        can be represented by an integer or a Python timedelta object.
        """
        expire_at = pytime.time()
        if isinstance(time, datetime.timedelta):
            time = time.seconds + time.days * 24 * 3600
        expire_at += time
        return bool(
            self.col.update({'k': name}, {'$set': {
                'exp': expire_at
            }})['n'])

    def flushdb(self):
        """
        Delete all keys in the current database
        """
        self.col.remove()
        return True

    def get(self, name):
        """
        Return the value at key ``name``, or None if the key doesn't exist
        """
        now = pytime.time()
        result = self.col.find_one({'k': name}) or {}
        if result.get('exp', now) < now:
            return None
        return result.get('v')

    def set(self, name, value, ex=None, px=None, nx=False, xx=False):
        """
        Set the value at key ``name`` to ``value``

        ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.

        ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.

        ``nx`` if set to True, set the value at key ``name`` to ``value`` if it
            does not already exist.

        ``xx`` if set to True, set the value at key ``name`` to ``value`` if it
            already exists.
        """
        upsert = True
        expire_at = pytime.time()
        if px:
            # if isinstance(px, datetime.timedelta):
            #     ms = int(px.microseconds / 1000)
            #     px = (px.seconds + px.days * 24 * 3600) * 1000 + ms
            # expire_at += px * 0.001
            raise NotImplementedError  # Millis to fine grained
        elif ex:
            if isinstance(ex, datetime.timedelta):
                ex = ex.seconds + ex.days * 24 * 3600
            expire_at += ex
        if nx:
            try:
                data = {'k': name, 'v': value, 'exp': expire_at}
                if ex:
                    data['exp'] = expire_at
                self.col.save(data)
                return True
            except DuplicateKeyError:
                return None
        elif xx:
            upsert = False
        data = {'v': value}
        if ex:
            data['exp'] = expire_at
        result = self.col.update({'k': name}, {'$set': data}, upsert=upsert)
        return True if result['n'] == 1 else None

    __setitem__ = set

    def ttl(self, name):
        """
        Returns the number of seconds until the key ``name`` will expire
        """
        now = pytime.time()
        exp = (self.col.find_one({'k': name}) or {}).get('exp', now)
        diff = exp - now
        return long(-1) if diff <= 0 else diff
Esempio n. 49
0
def start_echo_server_process():
    # XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
    p = Process(target=start_echo_server)
    p.start()
    return p
Esempio n. 50
0
class SmtpMessageServer(object):
    """
    This class can start an SMTP debugging server,
    configure LinOTP to talk to it and read the
    results back to the parent tester.

    On open, an SMTP server is set up to listen locally.
    Derived classes can define a hook to set the LinOTP
    configuration to point to this server.

    Example usage:

    with SmtpMessageServer(testcase) as smtp:
        get_otp()
    """
    def __init__(self, testcase, message_timeout):
        self.testcase = testcase

        # We need a minimum version of 2.9.2 to set the SMTP port number, so
        # skip if testing an earlier version
        self.testcase.need_linotp_version('2.9.2')

        self.timeout = message_timeout

        self.set_config = SetConfig(testcase.http_protocol, testcase.http_host,
                                    testcase.http_port, testcase.http_username,
                                    testcase.http_password)

        # We advertise the local SMTP server hostname
        # using the IP address that connects to LinOTP
        self.addr = self._get_local_ip()
        self.msg_payload = None

    def __enter__(self):
        self.smtp_process_queue = Queue()
        self.smtp_process = Process(target=get_otp_mail,
                                    args=(self.smtp_process_queue,
                                          self.timeout))
        self.smtp_process.start()
        self.port = self.smtp_process_queue.get(True, 5)
        self._do_lintop_config()

        return self

    def _do_lintop_config(self):
        parameters = self.get_config_parameters()

        logger.debug("Configuration parameters: %s", parameters)
        result = self.set_config.setConfig(parameters)

        assert result, "It was not possible to set the config. Result:%s" % result

    def get_config_parameters(self):
        # This function can be overridden to provide configuration parameters to configure
        # specific parts of LinOTP
        assert False, "This function should be overridden"

    def get_otp(self):
        messagestr = self.smtp_process_queue.get(True, 10)
        msg = email.message_from_string(messagestr)
        otp = msg.get_payload()

        logger.debug("Received email message payload:%s", otp)

        return otp

    def __exit__(self, *args):
        self.smtp_process_queue.close()
        self.smtp_process.terminate()
        self.smtp_process.join(5)

    def _get_local_ip(self):
        """
        Get the IP address of the interface that connects to
        LinOTP
        """

        with closing(
                socket.create_connection(
                    (self.testcase.http_host, int(self.testcase.http_port)),
                    10)) as s:
            addr = s.getsockname()[0]

        return addr
Esempio n. 51
0
class Fetcher(object):
    __metaclass__ = ABCMeta

    def __init__(self, worker_list):
        from .worker import Worker
        self.worker_list = worker_list
        assert isinstance(self.worker_list, list)
        for worker in self.worker_list:
            assert isinstance(worker, Worker)
        self.process = Process(target=self.run_forever, args=())
        self.running = False

    def start(self):
        self.process.start()

    @abstractmethod
    def shutdown(self):
        # 处理进程关闭
        self.running = False

    def setup_shutdown(self):
        """
        设置优雅退出
        :return:
        """
        import signal

        def on_sigterm(*ignore):
            self.shutdown()

        signal.signal(signal.SIGTERM, on_sigterm)
        signal.signal(signal.SIGINT, on_sigterm)

    @abstractmethod
    def choose(self, msg):
        """
        选择进程的处理进程下标
        如:hash(msg) % len(self.worker_list)
        :param msg:
        :return:
        """
        pass

    def do_feed(self, msg):
        index = self.choose(msg)
        assert isinstance(index, int) and index < len(self.worker_list)
        while True:
            try:
                self.worker_list[index].feed(msg)
            except Full:
                p = self.worker_list[index].process
                assert isinstance(p, Process)
                # 队列满了重试发送
                logging.error(
                    "pid:%s worker is full. Please check the Thread blocking situation."
                    % str(p.pid))
                continue
            break

    @abstractmethod
    def run_forever(self):
        """
        启动处理
        :return:
        """
        self.running = True

    def join(self):
        self.process.join()
Esempio n. 52
0
class ImageMngr(object):
    """
    This class handles most of the backend work for the image gateway.
    It uses a Mongo Database to track state, uses threads to dispatch work,
    and has public functions to lookup, pull and expire images.
    """
    def __init__(self, config, logger=None, logname='imagemngr'):
        """
        Create an instance of the image manager.
        """
        if logger is None and logname is None:
            self.logger = logging.getLogger(logname)
            log_handler = logging.StreamHandler()
            logfmt = '%(asctime)s [%(name)s] %(levelname)s : %(message)s'
            log_handler.setFormatter(logging.Formatter(logfmt))
            log_handler.setLevel(logging.INFO)
            self.logger.addHandler(log_handler)
        elif logname is not None:
            self.logger = logging.getLogger(logname)
            self.logger.info('ImageMngr using logname %s' % (logname))
        else:
            print "Using upstream logger"
            self.logger = logger
            print logger
            self.logger.info('ImageMngr using upstream logger')

        self.logger.debug('Initializing image manager')
        self.config = config
        if 'Platforms' not in self.config:
            raise NameError('Platforms not defined')
        self.systems = []
        # Time before another pull can be attempted
        self.pullupdatetimeout = 300
        if 'PullUpdateTime' in self.config:
            self.pullupdatetimeout = self.config['PullUpdateTimeout']
        # Max amount of time to allow for a pull
        self.pulltimeout = self.pullupdatetimeout
        # This is not intended to provide security, but just
        # provide a basic check that a session object is correct
        self.magic = 'imagemngrmagic'
        if 'Authentication' not in self.config:
            self.config['Authentication'] = "munge"
        self.auth = Authentication(self.config)
        self.platforms = self.config['Platforms']

        for system in self.config['Platforms']:
            self.systems.append(system)
        # Connect to database
        if 'MongoDBURI' not in self.config:
            raise NameError('MongoDBURI not defined')
        self.workers = WorkerThreads()
        self.status_queue = self.workers.get_updater_queue()
        self.status_proc = Process(target=self.status_thread,
                                   name='StatusThread')
        self.status_proc.start()
        atexit.register(self.shutdown)
        self.mongo_init()

    def shutdown(self):
        self.status_queue.put('stop')

    def mongo_init(self):
        client = MongoClient(self.config['MongoDBURI'])
        db_ = self.config['MongoDB']
        self.images = client[db_].images
        self.metrics = None
        if 'Metrics' in self.config and self.config['Metrics'] is True:
            self.metrics = client[db_].metrics

    def status_thread(self):
        """
        This listens for update messages from a queue.
        """
        self.mongo_init()
        while True:
            message = self.status_queue.get()
            if message == 'stop':
                self.logger.info("Shutting down Status Thread")
                break
            ident = message['id']
            state = message['state']
            meta = message['meta']
            # TODO: Handle a failed expire
            if state == "FAILURE":
                self.logger.warn("Operation failed for %s", ident)

            # print "Status: %s" % (state)
            # A response message
            if state != 'READY':
                self.update_mongo_state(ident, state, meta)
                continue
            if 'response' in meta and meta['response']:
                response = meta['response']
                self.logger.debug(response)
                if 'meta_only' in response:
                    self.logger.debug('Updating ACLs')
                    self.update_acls(ident, response)
                else:
                    self.complete_pull(ident, response)
                self.logger.debug('meta=%s', str(response))

    def check_session(self, session, system=None):
        """Check if this is a valid session
        session is a session handle
        """
        if 'magic' not in session:
            self.logger.warn("request recieved with no magic")
            return False
        elif session['magic'] is not self.magic:
            self.logger.warn("request received with bad magic %s",
                             session['magic'])
            return False
        if system is not None and session['system'] != system:
            self.logger.warn("request received with a bad system %s!=%s",
                             session['system'], system)
            return False
        return True

    def _isadmin(self, session, system=None):
        """
        Check if this is an admin user.
        Returns true if is an admin or false if not.
        """
        if 'admins' not in self.platforms[system]:
            return False
        admins = self.platforms[system]['admins']
        user = session['user']
        if user in admins:
            self.logger.info('user %s is an admin', user)
            return True
        return False

    def _isasystem(self, system):
        """Check if system is a valid platform."""
        return bool(system in self.systems)

    def _get_groups(self, uid, gid):
        """Look up auxilary groups. """
        proc = Popen(['id', '-G', '%d' % (uid)], stdout=PIPE, stderr=PIPE)
        if proc is None:
            self.logger.warn("Group lookup failed")
            return []
        stdout, stderr = proc.communicate()
        groups = []
        for group in stdout.split():
            groups.append(int(group))
        return groups

    def _checkread(self, session, rec):
        """
        Checks if the user has read permissions to the image.
        """

        # Start by checking if the image is public (no ACLs)
        if 'private' in rec and rec['private'] is False:
            return True
        iUACL = None
        iGACL = None
        if 'userACL' in rec:
            iUACL = rec['userACL']
        if 'groupACL' in rec:
            iGACL = rec['groupACL']
        if iUACL is None and iGACL is None:
            return True
        if iUACL == [] and iGACL == []:
            return True
        uid = session['uid']
        gid = session['gid']
        self.logger.debug('uid=%s iUACL=%s' % (uid, str(iUACL)))
        self.logger.debug('sessions = ' + str(session))
        groups = self._get_groups(uid, gid)
        if iUACL is not None and uid in iUACL:
            return True
        if iGACL is not None and gid in iGACL:
            return True
        for group in groups:
            if iGACL is not None and group in iGACL:
                return True
        return False

    def _resetexpire(self, ident):
        """Reset the expire time.  (Not fully implemented)."""
        # Change expire time for image
        # TODO shore up expire-time parsing
        expire_timeout = self.config['ImageExpirationTimeout']
        (days, hours, minutes, secs) = expire_timeout.split(':')
        expire = time() + int(secs) + 60 * (int(minutes) + 60 *
                                            (int(hours) + 24 * int(days)))
        self._images_update({'_id': ident}, {'$set': {'expiration': expire}})
        return expire

    def _make_acl(self, acllist, id):
        if id not in acllist:
            acllist.append(id)
        return acllist

    def _compare_list(self, a, b, key):
        """"
        look at the key element of two objects
        and compare the list of ids.

        return True if everything matches
        return False if anything is different
        """

        # If the key isn't in the objects or
        # something else fails, then it must
        # have changed.
        try:
            if key not in a:
                return False
            if key not in b:
                return False
        except:
            return True
        aitems = a[key]
        bitems = b[key]
        if len(aitems) != len(bitems):
            return False
        for item in aitems:
            if item not in bitems:
                return False
        return True

    def _add_metrics(self, session, request, record):
        """
        Add a row to mongo for this lookup request.
        """
        try:
            r = {
                'user': session['user'],
                'uid': session['uid'],
                'system': request['system'],
                'type': request['itype'],
                'tag': request['tag'],
                'id': record['id'],
                'time': time()
            }
            self._metrics_insert(r)
        except:
            self.logger.warn('Failed to log lookup.')

    def get_metrics(self, session, system, limit):
        """
        Return the last <limit> lookup records.
        """
        recs = []
        if not self._isadmin(session, system):
            return recs
        if self.metrics is None:
            return recs
        count = self.metrics.count()
        skip = count - limit
        if skip < 0:
            skip = 0
        for r in self.metrics.find().skip(skip):
            r.pop('_id', None)
            recs.append(r)
        return recs

    def new_session(self, auth_string, system):
        """
        Creates a session context that can be used for multiple transactions.
        auth is an auth string that will be passed to the authenication layer.
        Returns a context that can be used for subsequent operations.
        """
        if auth_string is None:
            return {'magic': self.magic, 'system': system}
        arec = self.auth.authenticate(auth_string, system)
        if arec is None and isinstance(arec, dict):
            raise OSError("Authenication returned None")
        else:
            if 'user' not in arec:
                raise OSError("Authentication returned invalid response")
            session = arec
            session['magic'] = self.magic
            session['system'] = system
            return session

    def lookup(self, session, image):
        """
        Lookup an image.
        Image is dictionary with system,itype and tag defined.
        """
        if not self.check_session(session, image['system']):
            raise OSError("Invalid Session")
        query = {
            'status': 'READY',
            'system': image['system'],
            'itype': image['itype'],
            'tag': {
                '$in': [image['tag']]
            }
        }
        self.update_states()
        rec = self._images_find_one(query)
        if rec is not None:
            if self._checkread(session, rec) is False:
                return None
            self._resetexpire(rec['_id'])

        if self.metrics is not None:
            self._add_metrics(session, image, rec)
        return rec

    def imglist(self, session, system):
        """
        list images for a system.
        Image is dictionary with system defined.
        """
        if not self.check_session(session, system):
            raise OSError("Invalid Session")
        if self._isasystem(system) is False:
            raise OSError("Invalid System")
        query = {'status': 'READY', 'system': system}
        self.update_states()
        records = self._images_find(query)
        resp = []
        for record in records:
            if self._checkread(session, record):
                resp.append(record)
        # verify access
        return resp

    def show_queue(self, session, system):
        """
        list queue for a system.
        Image is dictionary with system defined.
        """
        if not self.check_session(session, system):
            raise OSError("Invalid Session")
        query = {'status': {'$ne': 'READY'}, 'system': system}
        self.update_states()
        records = self._images_find(query)
        resp = []
        for record in records:
            resp.append({
                'status': record['status'],
                'image': record['pulltag']
            })
        return resp

    def _isready(self, image):
        """Helper function to determine if an image is READY."""
        query = {
            'status': 'READY',
            'system': image['system'],
            'itype': image['itype'],
            'tag': {
                '$in': [image['tag']]
            }
        }
        rec = self._images_find_one(query)
        if rec is not None:
            return True
        return False

    def _pullable(self, rec):
        """
        An image is pullable when:
        -There is no existing record
        -The status is a FAILURE
        -The status is READY and it is past the update time
        -The state is something else and the pull has expired
        """

        # if rec is None then do a pull
        if rec is None:
            return True

        # Okay there has been a pull before
        # If the status flag is missing just repull (shouldn't happen)
        if 'status' not in rec:
            return True
        status = rec['status']

        # EXPIRED images can be pulled
        if status == 'EXPIRED':
            return True

        # Need to deal with last_pull for a READY record
        if 'last_pull' not in rec:
            return True
        nextpull = self.pullupdatetimeout + rec['last_pull']

        # It has been a while, so re-pull to see if it is fresh
        if status == 'READY' and (time() > nextpull):
            return True

        # Repull failed pulls
        if status == 'FAILURE' and (time() > nextpull):
            return True

        # Last thing... What if the pull somehow got hung or died in the middle
        # See if heartbeat is old
        # TODO: add pull timeout.  For now use 1 hour
        if status != 'READY' and 'last_heartbeat' in rec:
            if (time() - rec['last_heartbeat']) > 3600:
                return True

        return False

    def new_pull_record(self, image):
        """
        Creates a new image in mongo.  If the pull already exist it removes
        it first.
        """
        # Clean out any existing records
        for rec in self._images_find(image):
            if rec['status'] == 'READY':
                continue
            else:
                self._images_remove({'_id': rec['_id']})
        newimage = {
            'format': 'invalid',  # <ext4|squashfs|vfs>
            'arch': 'amd64',  # <amd64|...>
            'os': 'linux',  # <linux|...>
            'location': '',  # urlencoded location
            'remotetype': 'dockerv2',  # <file|dockerv2|amazonec2>
            'ostcount': '0',  # integer, number of OSTs (future)
            'replication': '1',  # integer, number of copies to deploy
            'userACL': [],
            'groupACL': [],
            'private': None,
            'tag': [],
            'status': 'INIT'
        }
        if 'DefaultImageFormat' in self.config:
            newimage['format'] = self.config['DefaultImageFormat']
        for param in image:
            if param is 'tag':
                continue
            newimage[param] = image[param]
        self._images_insert(newimage)
        return newimage

    def pull(self, session, image, testmode=0):
        """
        pull the image
        Takes an auth token, a request object
        Optional: testmode={0,1,2} See below...
        """
        request = {
            'system': image['system'],
            'itype': image['itype'],
            'pulltag': image['tag']
        }
        self.logger.debug('Pull called Test Mode=%d', testmode)
        if not self.check_session(session, request['system']):
            self.logger.warn('Invalid session on system %s', request['system'])
            raise OSError("Invalid Session")
        # If a pull request exist for this tag
        #  check to see if it is expired or a failure, if so remove it
        # otherwise
        #  return the record
        rec = None
        # find any pull record
        self.update_states()
        # let's lookup the active image
        query = {
            'status': 'READY',
            'system': image['system'],
            'itype': image['itype'],
            'tag': {
                '$in': [image['tag']]
            }
        }
        rec = self._images_find_one(query)
        for record in self._images_find(request):
            status = record['status']
            if status == 'READY' or status == 'SUCCESS':
                continue
            rec = record
            break
        inflight = False
        recent = False
        if rec is not None and rec['status'] != 'READY':
            inflight = True
        elif rec is not None:
            # if an image has been pulled in the last 60 seconds
            # let's consider that "recent"
            if (time() - rec['last_pull']) < 10:
                recent = True
        request['userACL'] = []
        request['groupACL'] = []
        if 'userACL' in image and image['userACL'] != []:
            request['userACL'] = self._make_acl(image['userACL'],
                                                session['uid'])
        if 'groupACL' in image and image['groupACL'] != []:
            request['groupACL'] = self._make_acl(image['groupACL'],
                                                 session['gid'])
        if self._compare_list(request, rec, 'userACL') and \
                self._compare_list(request, rec, 'groupACL'):
            acl_changed = False
        else:
            self.logger.debug("No ACL change detected.")
            acl_changed = True

        # We could hit a key error or some other edge case
        # so just do our best and update if there are problems
        update = False
        if not recent and not inflight and acl_changed:
            self.logger.debug("ACL change detected.")
            update = True

        if self._pullable(rec):
            self.logger.debug("Pullable image")
            update = True

        if update:
            self.logger.debug("Creating New Pull Record")
            rec = self.new_pull_record(request)
            ident = rec['_id']
            self.logger.debug("ENQUEUEING Request")
            self.update_mongo_state(ident, 'ENQUEUED')
            request['tag'] = request['pulltag']
            request['session'] = session
            self.logger.debug("Calling do pull with queue=%s",
                              request['system'])
            self.workers.dopull(ident, request, testmode=testmode)

            memo = "pull request queued s=%s t=%s" \
                % (request['system'], request['tag'])
            self.logger.info(memo)

            self.update_mongo(ident, {'last_pull': time()})

        return rec

    def mngrimport(self, session, image, testmode=0):
        """
        import the image directly from a file
        Only for allowed users
        Takes an auth token, a request object
        """
        meta = {}
        fp = image['filepath']
        request = {
            'system': image['system'],
            'itype': image['itype'],
            'pulltag': image['tag'],
            'filepath': image['filepath'],
            'format': image['format'],
            'meta': meta
        }
        self.logger.debug('mngrmport called for file %s' % (fp))
        # self.logger.debug(image)
        if not self.check_session(session, request['system']):
            self.logger.warn('Invalid session on system %s', request['system'])
            raise OSError("Invalid Session")
        # Skip checks about previous requests for now
        # Future work could check the fasthash and
        # not import if they're the same
        q = {
            'system': image['system'],
            'itype': image['itype'],
            'pulltag': image['tag']
        }
        rec = self._images_find_one(q)
        if not self._pullable(rec):
            return rec
        # rec = None
        # request['userACL'] = []
        # request['groupACL'] = []
        # if 'userACL' in image and image['userACL'] != []:
        #     request['userACL'] = self._make_acl(image['userACL'],
        #                                         session['uid'])
        # if 'groupACL' in image and image['groupACL'] != []:
        #     request['groupACL'] = self._make_acl(image['groupACL'],
        #                                          session['gid'])
        # if self._compare_list(request, rec, 'userACL') and \
        #         self._compare_list(request, rec, 'groupACL'):
        #     acl_changed = False
        # else:
        #     self.logger.debug("No ACL change detected.")
        #     acl_changed = True

        # We could hit a key error or some other edge case
        # so just do our best and update if there are problems

        self.logger.debug("Creating New Import Record")
        # new_pull_record works for import too
        rec = self.new_pull_record(request)
        ident = rec['_id']
        self.logger.debug("ENQUEUEING Request, ident %s" % (ident))
        self.update_mongo_state(ident, 'ENQUEUED')
        request['tag'] = request['pulltag']
        request['session'] = session
        self.logger.debug("Calling wrkimport with queue=%s", request['system'])
        self.workers.dowrkimport(ident, request, testmode=testmode)

        memo = "import request queued s=%s t=%s" \
            % (request['system'], request['tag'])
        self.logger.info(memo)

        self.update_mongo(ident, {'last_pull': time()})

        return rec

    def update_mongo_state(self, ident, state, info=None):
        """
        Helper function to set the mongo state for an image with _id==ident
        to state=state.
        """
        if state == 'SUCCESS':
            state = 'READY'
        set_list = {'status': state, 'status_message': ''}
        if info is not None and isinstance(info, dict):
            if 'heartbeat' in info:
                set_list['last_heartbeat'] = info['heartbeat']
            if 'message' in info:
                set_list['status_message'] = info['message']
        self._images_update({'_id': ident}, {'$set': set_list})

    def add_tag(self, ident, system, tag):
        """
        Helper function to add a tag to an image.
        ident is the mongo id (not image id)
        """
        # Remove the tag first
        self.remove_tag(system, tag)
        # see if tag isn't a list
        rec = self._images_find_one({'_id': ident})
        if rec is not None and 'tag' in rec and \
                not isinstance(rec['tag'], (list)):
            memo = 'Fixing tag for non-list %s %s' % (ident, str(rec['tag']))
            self.logger.info(memo)
            curtag = rec['tag']
            self._images_update({'_id': ident}, {'$set': {'tag': [curtag]}})
        self._images_update({'_id': ident}, {'$addToSet': {'tag': tag}})
        return True

    def remove_tag(self, system, tag):
        """
        Helper function to remove a tag to an image.
        """
        self._images_update({
            'system': system,
            'tag': {
                '$in': [tag]
            }
        }, {'$pull': {
            'tag': tag
        }},
                            multi=True)
        return True

    def update_acls(self, ident, response):
        self.logger.debug("Update ACLs called for %s %s", ident, str(response))
        pullrec = self._images_find_one({'_id': ident})
        if pullrec is None:
            self.logger.error('ERROR: Missing pull request (r=%s)',
                              str(response))
            return
        # Check that this image ident doesn't already exist for this system
        rec = self._images_find_one({
            'id': response['id'],
            'status': 'READY',
            'system': pullrec['system']
        })
        if rec is None:
            # This means the image already existed, but we didn't have a
            # record of it.  That seems odd (it happens in tests).  Let's
            # note it and power on through.
            msg = "WARNING: No image record found for an ACL update"
            self.logger.warn(msg)
            response['last_pull'] = time()
            response['status'] = 'READY'
            self.update_mongo(ident, response)
            self.add_tag(ident, pullrec['system'], pullrec['pulltag'])
        else:
            updates = {
                'userACL': response['userACL'],
                'groupACL': response['groupACL'],
                'private': response['private'],
                'last_pull': time()
            }
            self.logger.debug("Doing ACLs update")
            response['last_pull'] = time()
            response['status'] = 'READY'
            self.update_mongo(rec['_id'], updates)
            self._images_remove({'_id': ident})

    def complete_pull(self, ident, response):
        """
        Transition a completed pull request to an available image.
        """

        self.logger.debug("Complete called for %s %s", ident, str(response))
        pullrec = self._images_find_one({'_id': ident})
        if pullrec is None:
            self.logger.warn('Missing pull request (r=%s)', str(response))
            return
        # Check that this image ident doesn't already exist for this system
        rec = self._images_find_one({
            'id': response['id'],
            'system': pullrec['system'],
            'status': 'READY'
        })
        tag = pullrec['pulltag']
        if rec is not None:
            # So we already had this image.
            # Let's delete the pull record.
            # TODO: update the pull time of the matching id
            self.logger.warn('Duplicate image')
            update_rec = {'last_pull': time()}
            self.update_mongo(rec['_id'], update_rec)

            self._images_remove({'_id': ident})
            # However it could be a new tag.  So let's update the tag
            try:
                rec['tag'].index(response['tag'])
            except:
                self.add_tag(rec['_id'], pullrec['system'], tag)
            return True
        else:
            response['last_pull'] = time()
            response['status'] = 'READY'
            self.update_mongo(ident, response)
            self.add_tag(ident, pullrec['system'], tag)

    def update_mongo(self, ident, resp):
        """
        Helper function to set the mongo values for an image with _id==ident.
        """
        setline = dict()
        # This maps from the key name in the response to the
        # key name used in mongo
        mappings = {
            'id': 'id',
            'entrypoint': 'ENTRY',
            'env': 'ENV',
            'workdir': 'WORKDIR',
            'last_pull': 'last_pull',
            'userACL': 'userACL',
            'groupACL': 'groupACL',
            'private': 'private',
            'status': 'status'
        }
        if 'private' in resp and resp['private'] is False:
            resp['userACL'] = []
            resp['groupACL'] = []

        for key in mappings.keys():
            if key in resp:
                setline[mappings[key]] = resp[key]

        self._images_update({'_id': ident}, {'$set': setline})

    def get_state(self, ident):
        """
        Lookup the state of the image with _id==ident in Mongo.
        Returns the state.
        """
        self.update_states()
        rec = self._images_find_one({'_id': ident}, {'status': 1})
        if rec is None:
            return None
        elif 'status' not in rec:
            return None
        return rec['status']

    def update_states(self):
        """
        Cleanup failed transcations after a period
        """
        for rec in self._images_find({'status': 'FAILURE'}):
            nextpull = self.pullupdatetimeout + rec['last_pull']
            # It it has been a while then let's clean up
            if time() > nextpull:
                self._images_remove({'_id': rec['_id']})

    def autoexpire(self, session, system, testmode=0):
        """Auto expire images and do cleanup"""
        # While this should be safe, let's restrict this to admins
        if not self._isadmin(session, system):
            return False
        # Cleanup - Lookup for things stuck in non-READY state
        self.update_states()
        removed = []
        for rec in self._images_find({
                'status': {
                    '$ne': 'READY'
                },
                'system': system
        }):
            if 'last_pull' not in rec:
                self.logger.warning('Image missing last_pull for pulltag:' +
                                    rec['pulltag'])
                continue
            if time() > rec['last_pull'] + self.pulltimeout:
                removed.append(rec['_id'])
                self._images_remove({'_id': rec['_id']})

        expired = []
        # Look for READY images that haven't been pulled recently
        for rec in self._images_find({'status': 'READY', 'system': system}):
            if 'expiration' not in rec:
                continue
            elif rec['expiration'] < time():
                self.logger.debug("expiring %s", rec['id'])
                ident = rec.pop('_id')
                self.expire_id(rec, ident)
                if 'id' in rec:
                    expired.append(rec['id'])
                else:
                    expired.append('unknown')
            self.logger.debug(rec['expiration'] > time())
        return expired

    def expire_id(self, rec, ident, testmode=0):
        """ Helper function to expire by id """
        memo = "Calling do expire id=%s TM=%d" \
            % (ident, testmode)
        self.logger.debug(memo)

        self.workers.doexpire(ident, rec)
        self.logger.info("expire request queued s=%s t=%s", rec['system'],
                         ident)

    def expire(self, session, image, testmode=0):
        """Expire an image.  (Not Implemented)"""
        if not self._isadmin(session, image['system']):
            return False
        query = {
            'system': image['system'],
            'itype': image['itype'],
            'tag': {
                '$in': [image['tag']]
            }
        }
        rec = self._images_find_one(query)
        if rec is None:
            return None
        ident = rec.pop('_id')
        memo = "Calling do expire with queue=%s id=%s TM=%d" \
            % (image['system'], ident, testmode)
        self.logger.debug(memo)
        self.workers.doexpire(ident, rec)

        memo = "expire request queued s=%s t=%s" \
            % (image['system'], image['tag'])
        self.logger.info(memo)

        return True

    @mongo_reconnect_reattempt
    def _images_remove(self, *args, **kwargs):
        """ Decorated function to remove images from mongo """
        return self.images.remove(*args, **kwargs)

    @mongo_reconnect_reattempt
    def _images_update(self, *args, **kwargs):
        """ Decorated function to updates images in mongo """
        return self.images.update(*args, **kwargs)

    @mongo_reconnect_reattempt
    def _images_find(self, *args, **kwargs):
        """ Decorated function to find images in mongo """
        return self.images.find(*args, **kwargs)

    @mongo_reconnect_reattempt
    def _images_find_one(self, *args, **kwargs):
        """ Decorated function to find one image in mongo """
        return self.images.find_one(*args, **kwargs)

    @mongo_reconnect_reattempt
    def _images_insert(self, *args, **kwargs):
        """ Decorated function to insert an image in mongo """
        return self.images.insert(*args, **kwargs)

    @mongo_reconnect_reattempt
    def _metrics_insert(self, *args, **kwargs):
        """ Decorated function to insert an image in mongo """
        if self.metrics is not None:
            return self.metrics.insert(*args, **kwargs)
Esempio n. 53
0
class WhenFunctionalTestingGameClient(unittest.TestCase):

    def setUp(self):
        #setup game server to run on a seperate process
        self.game_server = Process(target=start_server)
        self.game_server.start()

        #create the game client
        self.client = GameClient(host="127.0.0.1", port="5000")

        self.player_one = "Arthur"
        self.player_one_suspect = game_state.PEACOCK
        self.player_two = "Steven"
        self.player_two_suspect = game_state.PLUM

    def test_game_client(self):
        try:
            #give the game server process a chance to start
            time.sleep(3)

            #test registering players and choosing suspects
            self.client.register_player(self.player_one)
            self.client.choose_suspect(
                self.player_one, self.player_one_suspect)
            self.client.register_player(
                self.player_two)
            self.client.choose_suspect(
                self.player_two, self.player_two_suspect)

            #retreive the registered players with the client and validate the
            #return values
            players = self.client.get_players()
            for player in players:
                self.assertIsInstance(player, game_state.Player)
            self.assertTrue(
                self.player_one in [player.username
                for player in players])
            self.assertTrue(
                self.player_two in [player.username
                for player in players])
            self.assertTrue(
                self.player_one_suspect in [player.suspect
                for player in players])
            self.assertTrue(
                self.player_two_suspect in [player.suspect
                for player in players])

            #start a new game with the client and validate a GameState object
            #is returned
            game = self.client.start_new_game()
            self.assertTrue(game, game_state.GameState)

            game = self.client.get_game_state(game.game_id)
            self.assertTrue(game, game_state.GameState)

            #move player 1  from start space to hallway
            player = game.current_player
            player_1_current_space = game.game_board[player.suspect]
            move_space = player_1_current_space.connected_spaces[0]
            game = self.client.move_player(
                player.username, player.suspect, move_space)
            self.assertEqual(
                game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
            game = self.client.end_turn(player.username)
            player_1_current_space = game.game_board[move_space]
            self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)

            #move player 2  from start space to hallway
            player = game.current_player
            player_2_current_space = game.game_board[player.suspect]
            move_space = player_2_current_space.connected_spaces[0]
            game = self.client.move_player(
                player.username, player.suspect, move_space)
            self.assertEqual(
                game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
            game = self.client.end_turn(player.username)

            player_2_current_space = game.game_board[move_space]
            self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)

            #move player 1  from hallway to room
            player = game.current_player
            move_space = player_1_current_space.connected_spaces[0]
            game = self.client.move_player(
                player.username, player.suspect, move_space)
            self.assertEqual(
                game.turn_status, game_state.AWAITING_SUGGESTION)

            #make suggestion based on room player is currently in
            game = self.client.make_suggestion(
                player.username, game_state.MUSTARD,
                game_state.REVOLVER,
                move_space
            )

            #if there is a player that can prove the suggestion false
            #then test the suggestion response
            if game.suggestion_response_player:
                with self.assertRaises(errors.GameClientException):
                    game = self.client.move_player(
                        player.username, player.suspect, move_space)
                self.assertEqual(
                    game.turn_status, game_state.AWAITING_SUGGESTION_RESPONSE)

                response_player = game.suggestion_response_player
                suggestion = game.current_suggestion
                gamecard_item = list(
                    {suggestion.weapon, suggestion.room, suggestion.suspect}
                    &
                    set(card.item for card in response_player.game_cards))[0]
                game = self.client.make_suggestion_response(
                    response_player.username, gamecard_item)

            self.assertEqual(
                game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
            game = self.client.end_turn(player.username)

            self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)

            last_player = player
            player = game.current_player
            self.assertNotEqual(player.username, last_player.username)

            #test accusation
            suspect = [
                card.item for card in game.case_file
                if card.type == game_state.SUSPECT
            ][0]
            weapon = [
                card.item for card in game.case_file
                if card.type == game_state.WEAPON
            ][0]
            room = [
                card.item for card in game.case_file
                if card.type == game_state.ROOM
            ][0]

            game = self.client.make_accusation(
                player.username, suspect, weapon, room)

            for message in  game.player_messages:
                print message

            self.client.destroy_game(game.game_id)

        finally:
            self.game_server.terminate()
Esempio n. 54
0
def action_import_apks(storage, apk_paths,
                       copy_apk = False, copy_to_mongodb = False,
                       update = False, tag = None,
                       # shared memory
                       cnt_imported_apks = None, total_apk_count = None, import_finished = None,
                       # concurrent settings
                       concurrency = None
                       ):

    ''' Import the apks from the `apk_paths` and create the file system structure
    where the results will be kept, specified by `storage`.

    Parameters
    ----------
    storage : RedundantStorage
        The store to use.
    apk_paths : iterable<str>
        The apk files and/or directories.
    copy_apk : bool
        Import the apk file to the `import_dir` (copy it).
    copy_to_mongodb : bool, optional (default is False)
        Also import into MongoDB. Useful for the distributed analysis.
    update : bool
        Update apks that have already been imported.
    tag : str, optional (default is None)
        Some tag
    cnt_imported_apks : multiprocessing.Value<int>, optional (default is None)
        If given, use for progress updating.
    total_apk_count : multiprocessing.Value<int>, optional (default is None)
        If given, use for total count of apks.
    import_finished : multiprocessing.Value<byte>, optional (default is None)
        If given, use to signal that import has been completed.
    concurrency : int, optional (default is number of cpus)
        Number of processes to use for the import.
    '''
    from androlyze.loader.ApkImporter import ApkImporter

    # get single paths to apks so we get the correct total count of apks
    clilog.info("looking for apks in given paths ... ")
    apk_paths = ApkImporter.get_apks_from_list_or_dir(apk_paths)

    if total_apk_count is not None:
        # may be time consuming for recursive lookup
        apk_paths, total_apk_count.value = Util.count_iterable_n_clone(apk_paths)

    # create count if not given
    if cnt_imported_apks is None:
        cnt_imported_apks = Value('i', 0, lock = RLock())

    # set concurrency
    if concurrency is None:
        concurrency = cpu_count()
    log.warn("Using %d processes", concurrency)

    clilog.info("Storage dir is %s" % storage.fs_storage.store_root_dir)
    if copy_apk:
        clilog.info("Copying APKs to %s ..." % storage.fs_storage.store_root_dir)

    def import_apks(apk_paths):
        apk_importer = ApkImporter(apk_paths, storage)
        for apk in apk_importer.import_apks(copy_apk = copy_apk, copy_to_mongodb = copy_to_mongodb,
                                                update = update, tag = tag):

            clilog.info("imported %s", apk.short_description())

            # use shared memory counter if given
            if cnt_imported_apks is not None:
                with cnt_imported_apks.get_lock():
                    cnt_imported_apks.value += 1

    pool = []


    # don't convert generator to list if only 1 process wanted
    apk_paths = [apk_paths] if concurrency == 1 else Util.split_n_uniform_distri(list(apk_paths), concurrency)

    # start parallel import
    # multiprocessing's pool causes pickle errors
    for i in range(concurrency):
        p = Process(target = import_apks, args = (apk_paths[i], ))
        log.debug("starting process %s", p)
        pool.append(p)
        p.start()

    for it in pool:
        log.debug("joined on process %s", p)
        it.join()

    apks_imported = cnt_imported_apks.value != 0
    # show some message that no APK has been imported
    if not apks_imported:
        log.warn("No .apk file has been imported! This means no .apk file has been found or they already have been imported.")
    else:
        clilog.info("done")

    # because not all apks may be importable, we cannot use we count for signal that the import is done
    if import_finished is not None:
        import_finished.value = 1

    clilog.info("Imported %d apks", cnt_imported_apks.value)
Esempio n. 55
0
class AuthorizationCodeTestCase(unittest.TestCase):
    def setUp(self):
        self.client = None
        self.provider = None

    def test_request_access_token(self):
        def run_provider(queue):
            try:

                redirect_uri = "http://127.0.0.1:15487/callback"

                stores = store_factory(client_identifier="abc",
                                       client_secret="xyz",
                                       redirect_uris=[redirect_uri])

                provider = Provider(
                    access_token_store=stores["access_token_store"],
                    auth_code_store=stores["auth_code_store"],
                    client_store=stores["client_store"],
                    site_adapter=TestSiteAdapter(),
                    token_generator=Uuid4())

                provider.add_grant(AuthorizationCodeGrant(expires_in=120))
                provider.add_grant(RefreshToken(expires_in=60))

                app = Wsgi(server=provider)

                httpd = make_server('',
                                    15486,
                                    app,
                                    handler_class=NoLoggingHandler)

                queue.put({"result": 0})

                httpd.serve_forever()
            except Exception as e:
                queue.put({"result": 1, "error_message": str(e)})

        def run_client(queue):
            try:
                app = ClientApplication(
                    callback_url="http://127.0.0.1:15487/callback",
                    client_id="abc",
                    client_secret="xyz",
                    provider_url="http://127.0.0.1:15486")

                httpd = make_server('',
                                    15487,
                                    app,
                                    handler_class=NoLoggingHandler)

                queue.put({"result": 0})

                httpd.serve_forever()
            except Exception as e:
                queue.put({"result": 1, "error_message": str(e)})

        uuid_regex = "^[a-z0-9]{8}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{4}-[a-z0-9]{12}$"

        ready_queue = Queue()

        self.provider = Process(target=run_provider, args=(ready_queue, ))
        self.provider.start()

        provider_started = ready_queue.get()

        if provider_started["result"] != 0:
            raise Exception("Error starting Provider process with message"
                            "'{0}'".format(provider_started["error_message"]))

        self.client = Process(target=run_client, args=(ready_queue, ))
        self.client.start()

        client_started = ready_queue.get()

        if client_started["result"] != 0:
            raise Exception("Error starting Client Application process with "
                            "message '{0}'".format(
                                client_started["error_message"]))

        access_token_result = urlopen("http://127.0.0.1:15487/app").read()

        access_token_data = json.loads(access_token_result.decode('utf-8'))

        self.assertEqual(access_token_data["token_type"], "Bearer")
        self.assertEqual(access_token_data["expires_in"], 120)
        self.assertRegexpMatches(access_token_data["access_token"], uuid_regex)
        self.assertRegexpMatches(access_token_data["refresh_token"],
                                 uuid_regex)

        request_data = {
            "grant_type": "refresh_token",
            "refresh_token": access_token_data["refresh_token"],
            "client_id": "abc",
            "client_secret": "xyz"
        }

        refresh_token_result = urlopen("http://127.0.0.1:15486/token",
                                       urlencode(request_data).encode('utf-8'))

        refresh_token_data = json.loads(
            refresh_token_result.read().decode('utf-8'))

        self.assertEqual(refresh_token_data["token_type"], "Bearer")
        self.assertEqual(refresh_token_data["expires_in"], 120)
        self.assertRegexpMatches(refresh_token_data["access_token"],
                                 uuid_regex)

    def tearDown(self):
        if self.client is not None:
            self.client.terminate()
            self.client.join()

        if self.provider is not None:
            self.provider.terminate()
            self.provider.join()
Esempio n. 56
0
def start_example_app_process():
    # XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
    p = Process(target=start_example_server)
    p.start()
    return p
class AuthorizationCodeTestCase(unittest.TestCase):
    def setUp(self):
        self.client = None
        self.provider = None

    def test_request_access_token(self):
        def run_provider(queue):
            try:

                redirect_uri = "http://127.0.0.1:15487/callback"

                stores = store_factory(client_identifier="abc",
                                       client_secret="xyz",
                                       redirect_uris=[redirect_uri])

                provider = Provider(access_token_store=stores["access_token_store"],
                                    auth_code_store=stores["auth_code_store"],
                                    client_store=stores["client_store"],
                                    site_adapter=TestSiteAdapter(),
                                    token_generator=Uuid4())

                provider.add_grant(AuthorizationCodeGrant(expires_in=120))
                provider.add_grant(RefreshToken(expires_in=60))

                app = Wsgi(server=provider)

                httpd = make_server('', 15486, app,
                                    handler_class=NoLoggingHandler)

                queue.put({"result": 0})

                httpd.serve_forever()
            except Exception as e:
                queue.put({"result": 1, "error_message": str(e)})

        def run_client(queue):
            try:
                app = ClientApplication(
                    callback_url="http://127.0.0.1:15487/callback",
                    client_id="abc",
                    client_secret="xyz",
                    provider_url="http://127.0.0.1:15486")

                httpd = make_server('', 15487, app,
                                    handler_class=NoLoggingHandler)

                queue.put({"result": 0})

                httpd.serve_forever()
            except Exception as e:
                queue.put({"result": 1, "error_message": str(e)})

        uuid_regex = "^[a-z0-9]{8}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{4}-[a-z0-9]{12}$"

        ready_queue = Queue()

        self.provider = Process(target=run_provider, args=(ready_queue,))
        self.provider.start()

        provider_started = ready_queue.get()

        if provider_started["result"] != 0:
            raise Exception("Error starting Provider process with message"
                            "'{0}'".format(provider_started["error_message"]))

        self.client = Process(target=run_client, args=(ready_queue,))
        self.client.start()

        client_started = ready_queue.get()

        if client_started["result"] != 0:
            raise Exception("Error starting Client Application process with "
                            "message '{0}'"
                            .format(client_started["error_message"]))

        access_token_result = urlopen("http://127.0.0.1:15487/app").read()

        access_token_data = json.loads(access_token_result.decode('utf-8'))

        self.assertEqual(access_token_data["token_type"], "Bearer")
        self.assertEqual(access_token_data["expires_in"], 120)
        self.assertRegexpMatches(access_token_data["access_token"],
                                 uuid_regex)
        self.assertRegexpMatches(access_token_data["refresh_token"],
                                 uuid_regex)

        request_data = {"grant_type": "refresh_token",
                        "refresh_token": access_token_data["refresh_token"],
                        "client_id": "abc",
                        "client_secret": "xyz"}

        refresh_token_result = urlopen(
            "http://127.0.0.1:15486/token",
            urlencode(request_data).encode('utf-8')
        )

        refresh_token_data = json.loads(refresh_token_result.read().decode('utf-8'))

        self.assertEqual(refresh_token_data["token_type"], "Bearer")
        self.assertEqual(refresh_token_data["expires_in"], 120)
        self.assertRegexpMatches(refresh_token_data["access_token"],
                                 uuid_regex)

    def tearDown(self):
        if self.client is not None:
            self.client.terminate()
            self.client.join()

        if self.provider is not None:
            self.provider.terminate()
            self.provider.join()