Esempio n. 1
0
class CGSBoundedSemaphore:
    def __init__(self, value):
        self.boundedSemaphore = BoundedSemaphore(value)
        self.datasetQueueLock = RLock()
        self.datasetQueue = []

    def acquire(self, datasetId):
        try:
            self.datasetQueueLock.acquire()
            self.datasetQueue.append(datasetId)
        finally:
            self.datasetQueueLock.release()
        self.boundedSemaphore.acquire()

    def release(self, datasetId):
        try:
            self.datasetQueueLock.acquire()
            self.datasetQueue.remove(datasetId)
        except:
            pass
        finally:
            self.datasetQueueLock.release()
        self.boundedSemaphore.release()

    def getIndexDatasetId(self, datasetId):
        try:
            self.datasetQueueLock.acquire()
            return self.datasetQueue.index(datasetId)
        except:
            return -1
        finally:
            self.datasetQueueLock.release()

    def status(self):
        return list(self.datasetQueue)
Esempio n. 2
0
class BoundedExecutor:
    """BoundedExecutor behaves as a ThreadPoolExecutor which will block on
    calls to submit() once the limit given as "bound" work items are queued for
    execution.
    :param bound: Integer - the maximum number of items in the work queue
    :param max_workers: Integer - the size of the thread pool
    
    https://www.bettercodebytes.com/theadpoolexecutor-with-a-bounded-queue-in-python/
    """
    def __init__(self, bound, max_workers, callback=None, callbackArgs=None):
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        self.semaphore = BoundedSemaphore(bound + max_workers)
        self.__callback__ = callback if (callable(callback)) else None
        self.__callbackArgs__ = callbackArgs

    """See concurrent.futures.Executor#submit"""
    def submit(self, fn, *args, **kwargs):
        self.semaphore.acquire()
        try:
            future = self.executor.submit(fn, *args, **kwargs)
        except:
            self.semaphore.release()
            raise
        else:
            future.add_done_callback(lambda x: self.semaphore.release())
            return future

    """See concurrent.futures.Executor#shutdown"""
    def shutdown(self, wait=True):
        self.executor.shutdown(wait)
        if (self.__callback__):
            try:
                self.__callback__(self.__callbackArgs__)
            except:
                pass
Esempio n. 3
0
class VCNL4020():

	_ALS_OD       = 0b00010000	# オンデマンド明るさ計測スタート
	_PROX_OD      = 0b00001000	# オンデマンド近接計測スタート
	_ALS_EN       = 0b00000100	# 明るさ繰り返し計測有効
	_PROX_EN      = 0b00000010	# 近接繰り返し計測有効
	_SELFTIMED_EN = 0b00000001	# 内蔵タイマー有効
	
	_CONT_CONV    = 0b10000000	# Continue Conversion有効
	_AMBIENT_RATE = 0b00010000	# 明るさの計測レート(default:2sample/s)
	_AUTO_OFFSET  = 0b00001000	# 自動オフセットモード有効
	_AVERAGING    = 0b00000101	# 平均化(default:32conv)
	
	_COMMAND_REG       = 0x80	# コマンドレジスタ
	_PID_REG           = 0x81	# プロダクトIDレジスタ
	_PROX_RATE_REG     = 0x82	# 近接測定レートジスタ
	_IR_CURRENT_REG    = 0x83	# 近接測定用赤外線LED電流設定レジスタ(default=20mA)
	_AMBIENT_PARAM_REG = 0x84	# 明るさセンサーパラメータレジスタ
	
	_AMBIENT_MSB       = 0x85	# 明るさ上位バイト
	_AMBIENT_LSB	   = 0x86	# 明るさ下位バイト
	
	_PROX_MSB          = 0x87	# 近接上位バイト
	_PROX_LSB          = 0x88	# 近接下位バイト
	
	def __init__(self, i2c_addr = 0x13, busno = 1):
		self.addr = i2c_addr
		self.i2c = smbus.SMBus(busno)
		
		self._write_reg(self._COMMAND_REG, self._ALS_OD  |\
										   self._PROX_OD |\
										   self._ALS_EN  |\
										   self._PROX_EN |\
										   self._SELFTIMED_EN )
										   
		self._write_reg(self._IR_CURRENT_REG, 2 )	# 20mA
										   
		self._write_reg(self._AMBIENT_PARAM_REG, self._CONT_CONV    |\
												 self._AMBIENT_RATE |\
												 self._AUTO_OFFSET  |\
												 self._AVERAGING )
		self.semaphore = BoundedSemaphore()
		time.sleep(0.6)			# 初回測定まで待つ
		
	def _write_reg(self, reg, value):
		self.i2c.write_byte_data(self.addr, reg, value)
	
	@property
	def luminance(self):
		self.semaphore.acquire()
		d = self.i2c.read_i2c_block_data(self.addr, self._AMBIENT_MSB, 2)
		self.semaphore.release()
		return (d[0] * 256 + d[1])
	
	@property
	def proximity(self):
		self.semaphore.acquire()
		d = self.i2c.read_i2c_block_data(self.addr, self._PROX_MSB, 2)
		self.semaphore.release()
		return (d[0] * 256 + d[1])
class BoundedExecutor:
    """
    BoundedExecutor behaves as a ThreadPoolExecutor which will block on
    calls to submit() once the limit given as "bound" work items are queued for
    execution.
    :param max_queue_size: Integer - the maximum number of items in the work queue
    :param max_workers: Integer - the size of the thread pool
    """
    def __init__(self, max_queue_size, max_workers=None):
        self.semaphore = BoundedSemaphore(max_queue_size)
        self.executor = ThreadPoolExecutor(max_workers=max_workers)

    def submit(self, func, *args, **kwargs):
        """blocking submit method"""
        self.semaphore.acquire()
        try:
            future = self.executor.submit(func, *args, **kwargs)
        except:
            self.semaphore.release()
            raise
        else:
            future.add_done_callback(lambda x: self.semaphore.release())
            return future

    def shutdown(self, wait=True):
        """pass shutdown to executor"""
        self.executor.shutdown(wait=wait)
Esempio n. 5
0
    def run(self):
        """Runs threads to make requests.

        This is the target of each process spawned.  It maintains a thread
        pool, and each of the worker threads makes requests.
        """
        threadpool = BoundedSemaphore(self.settings['threads'])
        outfile = open("{0}.out".format(self.name), 'w')
        print(
            "\tSpawned process {0} (pid {1}) to handle {2} requests with {3} threads"
            .format(self.name, os.getpid(), len(self.requests),
                    self.settings['threads']))
        threads = []
        for r in self.requests:
            t = RequestThread(self.settings, threadpool, r, outfile)
            threads.append(t)

        for thread in threads:
            threadpool.acquire()
            thread.start()

        # Wait for threads to finish running
        while [thread for thread in threads if thread.is_alive()] != []:
            time.sleep(1)

        outfile.close()
        print("\t{0} finished!".format(self.name))
Esempio n. 6
0
class BoundedThreadPoolExecutor(object):
    """
    The default python ThreadPoolExecutor lets its queue 
    get infinitely long. This can use excessive memory without
    any performance benefit. This class bounds the thread queue
    and only adds more items when needed. 
    Credit: https://www.bettercodebytes.com/theadpoolexecutor-with-a-bounded-queue-in-python/
    """
    def __init__(self, bound, max_workers):
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        self.semaphore = BoundedSemaphore(bound + max_workers)

    def future_done(self, future):
        self.semaphore.release()
        exception = future.exception()
        if exception is not None:
            raise exception

    def submit(self, fn, *args, **kwargs):
        self.semaphore.acquire()
        try:
            future = self.executor.submit(fn, *args, **kwargs)
        except ValueError:
            self.semaphore.release()
            raise
        else:
            future.add_done_callback(lambda x: self.future_done(future))
            return future

    def shutdown(self, wait=True):
        self.executor.shutdown(wait)
Esempio n. 7
0
class Pool:
    """
    连接池
    """
    def __init__(self, count: int) -> None:
        self.count = count
        # 池中是连接对象的列表
        self.pool = [
            self._connect("conn-{}".format(x)) for x in range(self.count)
        ]
        self.sema = BoundedSemaphore(count)

    def _connect(self, conn_name):
        # 创建连接的方法,返回一个名称
        return Conn(conn_name)

    def get_conn(self):
        # 从连接池中拿走一个连接,先取得令牌再取资源
        self.sema.acquire()
        return self.pool.pop()

    def return_conn(self, conn: Conn):
        # 向池中归还一个连接,先归还资源再归还令牌
        self.pool.append(Conn)
        self.sema.release()
Esempio n. 8
0
def ip():
    #读取文件内容
    with open("scan_ip.txt", "r") as f:
        ftextlist = f.readlines()
    #循环遍历IP
    for dst in ftextlist:
        dst = dst.strip()
        a = dst.split(":")
        try:
            ip = a[0]
            port = a[1]
        except Exception as e:
            messagebox.showerror('错误', 'scan_ip.txt格式错误,请检查!' + dst)
        thread = int(e3.get())
        maxconnections = thread
        global semlock
        semlock = BoundedSemaphore(maxconnections)
        list = []
        if ipcheak(ip):
            semlock.acquire()
            t = Thread(target=inset, args=(ip, port))
            list.append(t)
            t.start()
        else:
            messagebox.showerror('错误', 'scan_ip.txt格式错误,请检查!' + dst)
class CGSBoundedSemaphore:
    def __init__(self,value):
        self.boundedSemaphore = BoundedSemaphore(value)
        self.datasetQueueLock = RLock()
        self.datasetQueue = []
    
    def acquire(self,datasetId):
        try:
            self.datasetQueueLock.acquire()
            self.datasetQueue.append(datasetId)
        finally:
            self.datasetQueueLock.release()
        self.boundedSemaphore.acquire()
        
    def release(self,datasetId):
        try:
            self.datasetQueueLock.acquire()
            self.datasetQueue.remove(datasetId)
        except:
            pass
        finally:
            self.datasetQueueLock.release()
        self.boundedSemaphore.release()
    
    def getIndexDatasetId(self,datasetId):
        try:
            self.datasetQueueLock.acquire()
            return self.datasetQueue.index(datasetId)
        except:            
            return -1
        finally:
            self.datasetQueueLock.release()
    
    def status(self):
        return list(self.datasetQueue)
Esempio n. 10
0
class BoundedExecutor:
    """BoundedExecutor behaves as a ThreadPoolExecutor which will block on
    calls to submit() once the limit given as "bound" work items are queued for
    execution.
    :param bound: Integer - the maximum number of items in the work queue
    :param max_workers: Integer - the size of the thread pool
    """
    def __init__(self, bound, max_workers):
        self._delegate = ThreadPoolExecutor(max_workers=max_workers)
        self._semaphore = BoundedSemaphore(bound + max_workers)

    """See concurrent.futures.Executor#submit"""

    def submit(self, fn, *args, **kwargs):
        self._semaphore.acquire()
        try:
            future = self._delegate.submit(fn, *args, **kwargs)
        except:
            self._semaphore.release()
            raise
        else:
            future.add_done_callback(lambda x: self._semaphore.release())
            return future

    """See concurrent.futures.Executor#shutdown"""

    def shutdown(self, wait=True):
        self._delegate.shutdown(wait)
Esempio n. 11
0
def main():
    global semaphore, sends

    signal.signal(signal.SIGINT, interrupt_handler)
    args = commandline()
    print(
        ' ( Shell:{shell}, Numbers:{max_request}, Threads:{max_threads}, Retry:{max_retry} )\n'
        .format(**args.__dict__))

    semaphore = BoundedSemaphore(value=args.max_threads)
    stopwatch_start = time.time()
    for i, payload in enumerate(create_payload(args), 1):
        if attack:
            sends = i
            semaphore.acquire()
            t = Thread(target=crack, args=(i, args, payload))
            t.setDaemon(True)
            t.start()

    for _ in range(args.max_threads):
        semaphore.acquire()

    stopwatch = time.time() - stopwatch_start
    words = args.max_request * sends if sends else pwd_total
    speed = words / stopwatch if stopwatch else 0
    msg = '[Success] Password: {}'.format(
        pwd) if pwd else '[Failed] No password found'
    print(
        '\n\n{msg}\n[Finish] {words} words in {stopwatch:.3f} seconds. ({speed:.0f} w/s)'
        .format(**locals()))
Esempio n. 12
0
def ip():
    with open("scan_ip.txt", "r") as f:
        ftextlist = f.readlines()
        if ftextlist == []:
            messagebox.showerror('错误', 'scan_ip.txt未写入IP地址,请检查!')
    startport = int(e1.get())
    endport = int(e2.get()) + 1
    thread = int(e3.get())
    maxconnections = thread
    global semlock
    semlock = BoundedSemaphore(maxconnections)
    list = []
    for dst in ftextlist:
        goBtn.config(state=DISABLED)
        quit.config(state=DISABLED)
        dst = dst.strip()
        if ipcheak(dst):
            while startport < endport:  # 指定扫描端口结束位置
                semlock.acquire()
                t = Thread(target=inset, args=(dst, startport))
                list.append(t)
                t.start()
                scr.update()
                startport = startport + 1
        else:
            messagebox.showerror('错误', 'scan_ip.txt格式错误,请检查!' + dst)
    goBtn.config(state=NORMAL)
    quit.config(state=NORMAL)
Esempio n. 13
0
def discover(aDict):
    """Function docstring for discover TBD

 Args:
  - a_dom_id (required)
  - network_id (required)

 Output:
 """
    from time import time
    from threading import Thread, BoundedSemaphore
    from zdcp.rest.ipam import network_discover as ipam_discover, address_allocate
    from zdcp.devices.generic import Device

    def __detect_thread(aIP, aDB, aSema):
        __dev = Device(aIP)
        aDB[aIP['ip']] = __dev.detect()['info']
        aSema.release()
        return True

    start_time = int(time())
    ipam = ipam_discover({'id': aDict['network_id']})
    ret = {'errors': 0, 'start': ipam['start'], 'end': ipam['end']}

    with DB() as db:
        db.do("SELECT id,name FROM device_types")
        devtypes = db.get_dict('name')
    dev_list = {}
    try:
        sema = BoundedSemaphore(20)
        for ip in ipam['addresses']:
            sema.acquire()
            t = Thread(target=__detect_thread, args=[ip, dev_list, sema])
            t.name = "Detect %s" % ip
            t.start()
        for i in range(20):
            sema.acquire()
    except Exception as err:
        ret['error'] = "Error:{}".format(str(err))

    # We can now do inserts only (no update) as we skip existing :-)
    with DB() as db:
        sql = "INSERT INTO devices (a_dom_id, ipam_id, snmp, model, type_id, hostname) VALUES (" + aDict[
            'a_dom_id'] + ",{},'{}','{}','{}','{}')"
        count = 0
        for ip, entry in dev_list.iteritems():
            count += 1
            alloc = address_allocate({
                'ip': ip,
                'network_id': aDict['network_id']
            })
            if alloc['success']:
                db.do(
                    sql.format(alloc['id'], entry['snmp'], entry['model'],
                               devtypes[entry['type']]['id'],
                               "unknown_%i" % count))
    ret['time'] = int(time()) - start_time
    ret['found'] = len(dev_list)
    return ret
Esempio n. 14
0
def action_scan_main(config):
	# --------------------------------------------------------------------------
	# Resolve target
	# --------------------------------------------------------------------------
	all_ips = build_targets(config)

	# --------------------------------------------------------------------------
	# Preparing scan
	# --------------------------------------------------------------------------
	target_number = len(all_ips)

	log.warning("  - Number of targets to analyze: %s" % target_number)

	# Semaphore
	sem = BoundedSemaphore(config.concurrency)
	threads = []

	# Map parameters
	_fn = partial(_do_scan, config, sem)

	log.error("  - Starting scan")

	# --------------------------------------------------------------------------
	# Do scan
	# --------------------------------------------------------------------------
	for x in all_ips:
		sem.acquire()

		t = Thread(target=_fn, args=(x,))
		threads.append(t)

		t.start()

	for t in threads:
		t.join()

	# --------------------------------------------------------------------------
	# Display results
	# --------------------------------------------------------------------------
	if OPEN_SERVICES:
		log.error("  - Open services found:")
		for host, content in six.iteritems(OPEN_SERVICES):
			log.error("    -> Host - %s" % host)
			for server_type, server_info in six.iteritems(content):
				log.error("       * %s/TCP [%s]" % (server_info['port'], server_type))

	else:
		log.error("  - No open services found")

	# --------------------------------------------------------------------------
	# Export results
	# --------------------------------------------------------------------------
	if config.output is not None:
		_output_path = "%s.json" % config.output if ".json" not in config.output else config.output

		with open(_output_path, "w") as f:
			json.dump(OPEN_SERVICES, f)

		log.error("  - Output results saved into: %s" % _output_path)
Esempio n. 15
0
def create_document(main_url, max_connections=2, filepath=None):
    """Creates an EPUB document from a fanfic.

       main_url -- user given URL which should be the first chapter
       max_connections -- maximum number of simultaneous connections
           default: 2. This should be chosen with care as the Terms of Service
           of some of the websites states that you shouldn't cause more stress
           than a normal visitor.
       filepath -- optional path for the resulting Epub document
           By default filename is: %author - %title.epub in the current
           directory. %author and %title in the path are special, they're
           changed to the story author and title respectively."""
    global dl_semaphore
    dl_semaphore = BoundedSemaphore(max_connections)

    parse, parse_ch1 = get_parser(main_url)
    html_ch1, chapter_num, story_info = get_chapter1(main_url, parse_ch1)
    chapters = {}
    chapters[1] = html_ch1

    if story_info["cover_url"]:
        cover_image_req = Request(
            story_info["cover_url"], headers=story_info["cover_headers"])
        cover_image = urlopen(cover_image_req).read()
    else:
        cover_image = open("default.jpg", "rb").read()

    with concurrent.futures.ThreadPoolExecutor(max_workers=max_connections+3) \
            as executor:
        parse_chapters = []
        download_urls = story_info["chapter_urls"]
        for ch in range(2, chapter_num+1):
            dl_semaphore.acquire()
            parse_chapters.append(
                executor.submit(get_chapter, download_urls[ch], ch, parse))
        for future in concurrent.futures.as_completed(parse_chapters):
            html, chapter_no = future.result()
            chapters[chapter_no] = html

    if not filepath:
        filepath = "{} - {}.epub".format(
            INVALID_CHARS.sub("-", story_info["author"]),
            INVALID_CHARS.sub("-", story_info["title"]))
    else:
        filepath = filepath.replace(
            "%author", INVALID_CHARS.sub("-", story_info["author"]))
        filepath = filepath.replace(
            "%title", INVALID_CHARS.sub("-", story_info["title"]))

    with zipfile.ZipFile(filepath, "w") as f:
        f.writestr("mimetype", MIMETYPE)
        f.writestr("META-INF/container.xml", CONTAINER_XML)
        f.writestr("Content/titlepage.html", TITLEPAGE_HTML)
        f.writestr("Content/styles.css", STYLES_CSS)
        f.writestr("Content/cover.jpg", cover_image)
        f.writestr("Content/toc.ncx", toc(story_info, chapter_num))
        f.writestr("Content/content.opf", contents(story_info, chapter_num))
        for ch in range(1, chapter_num+1):
            f.writestr("Content/Chapters/ch{}.html".format(ch), chapters[ch])
Esempio n. 16
0
class SocketServer(Thread):
    def __init__(self, host, port, max_clients):
        #Initialize the server.
        Thread.__init__(self)
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.host = host
        self.port = port
        self.sock.bind((host, port))
        #Up to five connections allowed to be queued, standard value
        self.sock.listen(5)
        #Threads registry
        self.sock_threads = []
        #Clients registry
        self.sock_pool = POOL
        #Timestam registry
        self.sock_timestamp = TIMESTAMPS
        #Semaphores used to constrain the possible connections
        self.semaph = BoundedSemaphore(value=max_clients)

    def close(self):
        #Close the client threads and server socket if they exist.
        print("Manager offline")
        for thr in self.sock_threads:
            thr.stop()
            thr.join()

        if (self.sock):
            self.sock.close()
            self.sock = None

    def run(self):
        #Accept an incoming connection
        print("Manager online from: {}, port: {}".format(self.host, self.port))
        self.__stop = False
        while not self.__stop:
            self.sock.settimeout(1)
            try:
                self.semaph.acquire()
                client_sock, client_addr = self.sock.accept()
            except socket.timeout:
                client_sock = None

            if (client_sock):
                client_thr = SocketServerThread(client_sock, client_addr,
                                                self.semaph)
                self.sock_threads.append(client_thr)
                #Dictionary needed to keep track of active threads
                THREADS[client_thr.client_ID] = client_thr
                #Dictionary of semaphores needed to lock threads respectively
                SEMAPHORES[client_thr.client_ID] = Semaphore()
                client_thr.start()
            else:
                self.semaph.release()
        self.close()

    def stop(self):
        self.__stop = True
Esempio n. 17
0
 def thread_run(class_name, cates, sem=3):
     semlock = BoundedSemaphore(sem)  # threading.BoundedSemaphore()
     for cate in cates:
         semlock.acquire()
         if isinstance(class_name, object):
             obj = class_name()
             if hasattr(obj, 'start'):
                 thread = Thread(target=obj.start, args=(cate,))
                 # threads.append(thread)
                 thread.start()
             else:
                 raise ('*******obj has no start func')
class ThreadedGroup:
    """ An object to keep an arbitary number of workers on an easily-split
        task.
    """
    
    def __init__(self):
        """ Initialise self """
        
        self._job_count = 0 # The number of active jobs.
        self.lock = Lock() # Lock protecting job_count...
        self.semaphore = BoundedSemaphore(THREAD_COUNT)
        
    def start(self, func, *args, **kargs):
        """ Start the given job. This will block until there are free
            workers.
        """
        
        def job_wrapper():
            """ Run the job, then decrement the job count and release the
                semaphore.
            """
            try:
                func(*args, **kargs)
            finally:
                with self.lock:
                    self._job_count -= 1
                self.semaphore.release()
        
        # Create the job.
        job = Job(job_wrapper)
        # Acquire the semaphore and start.
        self.semaphore.acquire()
        with self.lock:
            self._job_count += 1
        job.start()
        
    def wait(self):
        """ Wait until all of the jobs are finished """
        
        print("Waiting for jobs to finish...")
        
        while self.get_job_count() != 0:
            # Block until another job ends.
            self.semaphore.acquire()
            
        print("Jobs finished!")

    def get_job_count(self):
        """ Return the current job count """

        with self.lock:
            return self._job_count
Esempio n. 19
0
def main(sysargv=[]):
    TLIM = 50  #the maximum number of concurrent threads
    MLIM = 1000.0  #the memory limit (in GB)
    if len(sysargv) > 0:
        TLIM = int(sysargv[0])
    if len(sysargv) > 1:
        MLIM = float(sysargv[1])

    #imprecise safety precautions
    TLIM = TLIM - 1
    MLIM = int(MLIM * (2**20) * float(TLIM) / (TLIM + 1))  #in kB

    t_arr = []  #array of threads
    sem = BoundedSemaphore(
        value=TLIM)  #semaphore to control the number of threads
    lock = Lock()

    #################################################
    ##GENERATE FILELIST HERE
    filelist = []
    #################################################

    #run the job for each file
    for i, f in enumerate(filelist):  #i is the counter, f is the filename

        #################################################
        ##GENERATE CMD HERE
        cmd = "echo \"stuff\""
        #################################################

        t_arr.append(threading.Thread(target=run_job, args=(
            cmd,
            sem,
            lock,
        )))
        t_arr[i].daemon = True

        #wait until enough threads have finished to start another
        sem.acquire()

        #wait until there's enough memory
        #process = psutil.Process(os.getpid())
        #memuse = (process.memory_info()).rss
        memdata = resource.getrusage(resource.RUSAGE_BOTH)
        memuse = memdata.ru_ixrss + memdata.ru_idrss
        if memuse >= MLIM:
            lock.acquire(blocking=True)

        t_arr[i].start()

    #wait until the last thread is finished to exit
    t_arr[-1].join()
Esempio n. 20
0
class ThreadSemaphore:
    def __init__(self, lim_handlers):
        self.bounded_semaphore = BoundedSemaphore(lim_handlers)

    def add_to_queue(self, *, fun, args=None, kwargs=None):
        self.bounded_semaphore.acquire()
        Thread(target=self._run, args=(fun, args, kwargs)).start()

    def _run(self, fun, args=None, kwargs=None):
        try:
            run_fun(target=fun, args=args, kwargs=kwargs)
        finally:
            self.bounded_semaphore.release()
Esempio n. 21
0
def network_discover(aDict):
 """ Function discovers _new_ IP:s that answer to ping within a certain network. A list of such IP:s are returned

 Args:
  - id (required)
  - simultaneous (optional). Simultaneouse threads

 Output:
  - addresses. list of ip:s and ip_int pairs that answer to ping
 """
 from threading import Thread, BoundedSemaphore
 from struct import pack
 from socket import inet_ntoa
 from os import system
 def GL_int2ip(addr):
  return inet_ntoa(pack("!I", addr))

 def __detect_thread(aIPint,aIPs,aSema):
  __ip = GL_int2ip(aIPint)
  if system("ping -c 1 -w 1 %s > /dev/null 2>&1"%(__ip)) == 0:
   aIPs.append(__ip)
  aSema.release()
  return True

 addresses = []
 simultaneous = int(aDict.get('simultaneous',20))
 ret = {'addresses':addresses}

 with DB() as db:
  db.do("SELECT network,mask FROM ipam_networks WHERE id = %s"%aDict['id'])
  net = db.get_row()
  ip_start = net['network'] + 1
  ip_end   = net['network'] + 2**(32 - net['mask']) - 1
  ret.update({'start':{'ipint':ip_start,'ip':GL_int2ip(ip_start)},'end':{'ipint':ip_end,'ip':GL_int2ip(ip_end)}})
  db.do("SELECT ip FROM ipam_addresses WHERE network_id = %s"%aDict['id'])
  ip_list = db.get_dict('ip')

 try:
  sema = BoundedSemaphore(simultaneous)
  for ip in range(ip_start,ip_end):
   if not ip_list.get(ip):
    sema.acquire()
    t = Thread(target = __detect_thread, args=[ip, addresses, sema])
    t.name = "Detect %s"%ip
    t.start()
  for i in range(simultaneous):
   sema.acquire()
 except Exception as err:
  ret['error']   = str(err)

 return ret
class BoundedExecutor:
    """
    BoundedExecutor behaves as a ThreadPoolExecutor which will block on
    calls to submit() once the limit given as "bound" work items are queued for
    execution.
    :param bound: Integer - the maximum number of items in the work queue
    :param max_workers: Integer - the size of the thread pool
    """
    def __init__(self, bound, max_workers):
        self.max_workers = max_workers
        self.bound = bound

    """
    Allows together with __exit__ to use this class in with statements.
    Open executor and semaphore
    """

    def __enter__(self):
        self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
        self.semaphore = BoundedSemaphore(self.bound + self.max_workers)
        return self

    """
    Allows together with __enter__ to use this class in with statements
    Closes the thread pool
    """

    def __exit__(self, exc_type, exc_value, traceback):
        self.shutdown()

    """
    See concurrent.futures.Executor#submit
    """

    def submit(self, fn, *args, **kwargs):
        self.semaphore.acquire()
        try:
            future = self.executor.submit(fn, *args, **kwargs)
        except:
            self.semaphore.release()
            raise
        else:
            future.add_done_callback(lambda x: self.semaphore.release())
            return future

    """
    See concurrent.futures.Executor#shutdown
    """

    def shutdown(self, wait=True):
        self.executor.shutdown(wait)
Esempio n. 23
0
def execute_semaphored_threads():
    inputs = list(range(800, 1000))
    print("Calculating from {} to {}".format(inputs[0], inputs[-1]))
    # only four threads at time
    pool_sema = BoundedSemaphore(value=4)
    threads = []
    for i in inputs:
        # limit threads amount
        pool_sema.acquire()
        t = Thread(target=execute_fib, args=(i,))
        threads.append(t)
        t.start()
        pool_sema.release()
    return threads
Esempio n. 24
0
 def plot_all(plot_ids, df, start_date, end_date):
     threads = []
     df = df.sort_values(by=BankSchema.SCHEMA_BANK_DATE.name)
     semaphor_pool = BoundedSemaphore(value=Visuals.PLOT_MAX_THREADS)
     logging.info(f'Beginning plotting using {Visuals.PLOT_MAX_THREADS} threads.')
     for plt_id in plot_ids:
         semaphor_pool.acquire()
         t = Thread(target=ImageRegistry._plot, args=(plt_id, start_date, end_date, df))
         threads.append(t)
         t.start()
         semaphor_pool.release()
     # wait for all of them to finish
     for x in threads:
         x.join()
Esempio n. 25
0
class WorkerThread(Thread):

    def __init__(self, name):
        Thread.__init__(self)
        self.__pauseSemaphore = BoundedSemaphore(1)
        self.__logger = logging.getLogger(name)
        self.__is_terminating = False
        self.__pauseSemaphore.acquire(blocking=False)
        self.interval = 0.0
        self.name = name
        self.start()


    def run(self):
        self.init()
        while True:
            # if interval is greater than zero, sleep for that number of seconds before attempting
            # to continue the WorkerThread
            if self.interval > 0.0:
                time.sleep(self.interval)
            # Acquire and release __pauseSemaphore before calling loop
            # This will block the thread if pause() has been called
            self.__pauseSemaphore.acquire()
            self.__pauseSemaphore.release()
            if (self.__is_terminating):
                #self.log("Terminating")
                return
            self.loop()

    def pause(self):
        self.log("pausing")
        self.__pauseSemaphore.acquire(blocking=False)

    def resume(self):
        #self.log("resuming")
        try:
            self.__pauseSemaphore.release()
        except ValueError:
            pass

    def singleRun(self):
        # self.log("run single iteration")
        self.__pauseSemaphore.acquire(blocking=False)
        self.loop()

    def terminate(self):
        #self.log("terminate() called")
        self.__is_terminating = True
        self.resume()
        #self.join()

    def log(self, message):
        self.__logger.info(message)

    def init(self):
        pass

    def loop(self):
        pass
class Flight:
    def __init__(self, available_seat):
        self.available_seat=available_seat
        self.l=BoundedSemaphore(2) #to thread will lock this one.
        print(self.l)
        print(self.l._value)
    def reserve(self, need_seat):
        self.l.acquire()
        print('Available Seats:', self.available_seat)
        if(self.available_seat>=need_seat):
            name=current_thread().name
            print(f'{need_seat} seat is alloted for {name}')
            self.available_seat-=need_seat
        else:
            print('Sorry! All seats has alloted')
        self.l.release()
Esempio n. 27
0
def __get__hist__data__control(ticker, sem: BoundedSemaphore, njobs: Countdown, timeout: float,
                               sleep_time: float, logger: data_logging.Logger):
    timer = CountdownTime(sleep_time)
    res = sem.acquire(timeout=timeout)
    ret = None
    if not res:
        logger.log(data_logging.ERROR, f'Error::Timeout For Ticker {ticker}')
        ret = DataTimeoutError(timeout, f'requesting historical data for ticker = {ticker}')
    else:
        try:
            ret = __get_hist_data__(ticker)
        except MissingDataException as missing:
            logger.log(data_logging.ERROR, f'Error::Missing data in response for Ticker {ticker}')

            ret = missing

    # sleep and cleanup
    njobs.dec_count()
    if not njobs.finished() and not isinstance(ret, DataFetchException):
        time_left = timer.time_left_sec()
        if time_left > 0:
            logger.log(data_logging.DEBUG, f'sleeping: {time_left} seconds.')
            time.sleep(time_left)
    sem.release()
    if isinstance(ret, DataFetchException):
        raise ret
    return ret
Esempio n. 28
0
class BoundedIterator(collections.Iterator):
    """Limits the number of values to yield until yielded values are
    acknowledged.
    """
    def __init__(self, bound, it: Iterable[T]):
        self._it = iter(it)

        self._sem = BoundedSemaphore(bound)

    def __iter__(self) -> Iterator[T]:
        return self

    def __next__(self) -> T:
        return self.next()

    def next(self, timeout=None) -> T:
        """Returns the next value from the iterable.

        This method is not thread-safe.

        :raises TimeoutError: if timeout is given and no value is acknowledged in the mean time.
        """
        if not self._sem.acquire(timeout=timeout):
            raise TimeoutError('Too many values un-acknowledged.')

        return next(self._it)

    def processed(self):
        """Acknowledges one value allowing another one to be yielded.

        This method is thread-safe.
        """
        self._sem.release()
Esempio n. 29
0
class BaseApi:
    """docstring for BaseApi"""
    def __init__(self, base_url, rate, interval):
        super(BaseApi, self).__init__()
        self.base_url = base_url
        self.interval = interval
        self.api_slots = BoundedSemaphore(value=rate)
        self.waiting_requests = Queue()

    def send_requests(self):
        self.api_slots.acquire()
        request_info = self.waiting_requests.get()
        response = request_info.method(self.base_url + request_info.url,
                                       params=request_info.params)
        # spawn callback thread?
        Timer(self.interval, self.api_slots.release).start()
Esempio n. 30
0
class MultipleQueueThread:
    def __init__(self, *, target, max_thread4queue):
        self._max_thread4queue = max_thread4queue
        self._semaphore = BoundedSemaphore(max_thread4queue)
        self._queue = Queue()
        self._stop_event = Event()
        self._target = target
        for i in range(max_thread4queue):
            self._semaphore.acquire()
            Thread(target=self._run).start()
        log_info('Start MultipleQueueThread')

    def _run(self):
        while not self._stop_event.is_set():
            try:
                args, kwargs = self._queue.get()
                run_fun(target=self._target, args=args, kwargs=kwargs)
            except TypeError:
                pass
        self._semaphore.release()

    def stop(self, *, force=False):
        if not force:
            while not self._queue.empty():
                pass

        self._stop_event.set()
        for i in range(self._max_thread4queue):
            self._queue.put(None)
        while self._semaphore._value != 0:
            pass
        log_info('Stop MultipleQueueThread')

    def put(self, *, args=None, kwargs=None):
        if self._stop_event.is_set():
            raise Exception('Already stopped')
        # tt = time.time()
        self._queue.put((
            args,
            kwargs,
        ))
        # log_info(f'Put tume: {time.time() - tt}')

    def is_stopped(self):
        return self._stop_event.is_set()
Esempio n. 31
0
class Device():
    def __init__(self, name: str, number: int = 1):
        self.name = name
        assert number > 0, Exception(
            "Remember Devices are wrappers for BoundedSemaphores. You cannot have less than 1"
        )
        assert type(number) == int, Exception(
            "Device number is quantity, must be integer not %s" % type(number))
        self.__sem__ = BoundedSemaphore(number)

    def __repr__(self):
        return "name: {}, locked: {}".format(self.name, self.__lock__.locked)

    def get(self):
        self.__sem__.acquire()

    def release(self):
        self.__sem__.release()
Esempio n. 32
0
class IDCardFactory(Factory):
    def __init__(self):
        self.owner = []
        self.seqNum = 1
        self.semaphore = BoundedSemaphore(1)

    def createProduct(self, owner):
        self.semaphore.acquire()
        card = IDCard(self.seqNum, owner)
        self.seqNum += 1
        self.semaphore.release()
        return card

    def registerProduct(self, product):
        self.owner.append(product.getOwner())

    def getOwners(self):
        return self.owner
Esempio n. 33
0
class MyThread(threading.Thread):

 	def __init__(self,site):
 		self.site = site
		threading.Thread.__init__(self)
		self.semaphore = BoundedSemaphore(value=MAXCONN)
		self.t = Tweetya()

	def run(self):
		link = self.t.parse(self.site)
		self.semaphore.acquire() 
		urls = self.t.getlinks()
		for i in link:
			if  not (i in urls):
				self.t.setlink(i)
				short = self.t.short(i)
				title = self.t.gettitle(short)
				self.t.auth(str(title)+' '+str(short))
		self.semaphore.release()
Esempio n. 34
0
class IDCardFactory(Factory):

    def __init__(self):
        self.owner = []
        self.seqNum = 1
        self.semaphore = BoundedSemaphore(1)

    def createProduct(self, owner):
        self.semaphore.acquire()
        card = IDCard(self.seqNum, owner)
        self.seqNum += 1
        self.semaphore.release()
        return card

    def registerProduct(self, product):
        self.owner.append(product.getOwner())

    def getOwners(self):
        return self.owner
Esempio n. 35
0
class Pool:
    def __init__(self, count):
        self.count = count
        self.pool = [
            self._connect('worker - {}'.format(x)) for x in range(count)
        ]
        self.sema = BoundedSemaphore(
            count)  # Semaphore with a bound with count

    def _connect(self, name):
        return Conn(name)

    def get(self):  #get a element from the pool
        self.sema.acquire()  # lock and value will decrease by 1
        return self.pool.pop()

    def return_conn(self, conn: Conn):  # return a element to the pool
        self.pool.append(conn)
        self.sema.release(
        )  #release and value will increase by 1 but bounded to count
Esempio n. 36
0
class ThreadPool:
    """ Pool of threads consuming tasks from a queue """
    def __init__(self, num_threads):
        self._results_queue = Queue()
        self._exceptions_queue = Queue()
        self._tasks_queue = Queue()
        self._sem = BoundedSemaphore(num_threads)
        self._num_threads = num_threads

    def add_task(self, func, *args, **kargs):
        """
        Add a task to the queue. Calling this function can block
        until workers have a room for processing new tasks. Blocking
        the caller also prevents the latter from allocating a lot of
        memory while workers are still busy running their assigned tasks.
        """
        self._sem.acquire()
        cleanup_func = self._sem.release
        self._tasks_queue.put((func, args, kargs, cleanup_func))

    def start_parallel(self):
        """ Prepare threads to run tasks"""
        for _ in range(self._num_threads):
            Worker(
                self._tasks_queue,
                self._results_queue,
                self._exceptions_queue,
            )

    def result(self):
        """ Stop threads and return the result of all called tasks """
        # Send None to all threads to cleanly stop them
        for _ in range(self._num_threads):
            self._tasks_queue.put(None)
        # Wait for completion of all the tasks in the queue
        self._tasks_queue.join()
        # Check if one of the thread raised an exception, if yes
        # raise it here in the function
        if not self._exceptions_queue.empty():
            raise self._exceptions_queue.get()
        return self._results_queue
Esempio n. 37
0
class rpiFIFOClass(deque):
	"""
	Implements the a Deque with BoundedSemaphore.
	Used as a FIFO buffer for the image file names (including the full path).
	Stores also the name of the current sub-folder.
	"""
	def __init__(self, *args):
		super(rpiFIFOClass,self).__init__(*args)
		self.FIFOSema  = BoundedSemaphore()
		self.crtSubDir = '/'
		self.camID     = ''
		
	def acquireSemaphore(self):
		self.FIFOSema.acquire()
		
	def releaseSemaphore(self):
		self.FIFOSema.release()		

	def __del__(self):
#		self.FIFOSema.release()	
		self.crtSubDir = ''
Esempio n. 38
0
class Results:
    def __init__(self):
	self.mutex = BoundedSemaphore(value=1)
	self.__issues = []

    def __len__(self):
	return len(self.__issues)

    # ------------------------------------------------
    # data functions
    # ------------------------------------------------
    def add_result(self, result):
	self.mutex.acquire()
	self.__issues.append(result)
	self.mutex.release()

    def get_result(self, index):
	self.mutex.acquire()
	item = self.__issues[index]
	self.mutex.release()

	return item
Esempio n. 39
0
class semaphore(object):
    """ Class encapsulating a semaphore to limit
    number of resources  """

    def __init__(self, *args):
        self.sem = BoundedSemaphore(args[0])
    
    def __call__(self, f):
        def semfunc(*args, **kwargs):
            try:
                print 'Trying to acquire sem=>',currentThread()
                self.sem.acquire()
                print 'Acquired sem=>',currentThread()
                try:
                    return f(*args, **kwargs)
                except Exception, e:
                    raise
            finally:
                self.sem.release()
                print 'Released sem=>',currentThread()

        
        return semfunc
Esempio n. 40
0
class BlockingRequest():
    
    def __init__(self):
        self.semaphore = BoundedSemaphore(1)
        self.exception = None
        self.response = None
        self.semaphore.acquire(True)
    
    def response_callback(self, response):
        self.response = response
        self.semaphore.release()
    
    
    def error_callback(self, exception):
        self.exception = exception
        self.semaphore.release()
    
    ''' returns the response or throws an exception '''
    def await_response(self):
        self.semaphore.acquire(True)        
        if self.exception :
            raise self.exception
        return self.response
Esempio n. 41
0
class BuildDispatcher(Singleton):

    def _singleton_init(self):
        self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
        self.builders = dict()
        self.builders_lock = BoundedSemaphore()
        NotificationCenter().add_observer(self, 'handle_state_change', 'image.status')

    def handle_state_change(self, notification):
        status = notification.user_info['new_status']
        if(status in ('COMPLETED', 'FAILED', 'DELETED', 'DELETEFAILED')):
            self.builders_lock.acquire()
            image_id = notification.sender.identifier
            if(image_id in self.builders):
                del self.builders[image_id]
                self.log.debug('Removed builder from BuildDispatcher on notification from image %s: %s' % (image_id, status))
            self.builders_lock.release()

    def builder_for_base_image(self, template, parameters=None):
        builder = Builder()
        builder.build_image_from_template(template, parameters=parameters)
        self.builders_lock.acquire()
        try:
            self.builders[builder.base_image.identifier] = builder
        finally:
            self.builders_lock.release()
        return builder

    def builder_for_target_image(self, target, image_id=None, template=None, parameters=None):
        builder = Builder()
        builder.customize_image_for_target(target, image_id, template, parameters)
        self.builders_lock.acquire()
        try:
            self.builders[builder.target_image.identifier] = builder
        finally:
            self.builders_lock.release()
        return builder

    def builder_for_provider_image(self, provider, credentials, target, image_id=None, template=None, parameters=None):
        builder = Builder()
        builder.create_image_on_provider(provider, credentials, target, image_id, template, parameters)
        self.builders_lock.acquire()
        try:
            self.builders[builder.provider_image.identifier] = builder
        finally:
            self.builders_lock.release()
        return builder
class Store(object):
    def __init__(self, item_number, person_capacity):
        if type(item_number) is not int:
            raise Exception("item_number is not an integer")
        if type(person_capacity) is not int:
            raise Exception("person_capacity is not an integer")
        
        self.item_number = item_number
        self.person_capacity = person_capacity
        self.sema = BoundedSemaphore(value=self.person_capacity)

    def enter(self):
        self.sema.acquire()

    def buy(self):
        sleep(randint(5, 10))
        if self.item_number:
            purchase = True
            self.item_number -= 1
        else:
            purchase = False

        self.sema.release()
        return purchase
Esempio n. 43
0
def map_task(source, func, thread_limit=10):
  '''Run func in up to thread_limit threads, with data from
  source arg passed into it.

  The arg source must be iterable. map_task() will call next()
  each time a free thread is available.

  The function will block until all of the tasks are completed.
  '''
  assert thread_limit > 0
  e = BoundedSemaphore(thread_limit)
  task_list = []
  for i in xrange(0, thread_limit):
    task_list.append(task(func, e))
  iterer = source.__iter__()
  data = None
  while 1:
    try:
      if data is None:
        data = iterer.next()
      t = get_free_task(task_list)
      if t:
        t.data = data
        t.start()
        data = None
      else:
#        print >> sys.stderr, 'waiting for e'
        e.acquire()
        e.release()
    except StopIteration:
    # iteration is stopped
#      print >>sys.stderr, 'terminating'
      for a_task in task_list:
#        print >>sys.stderr, 'terminating ' + str(a_task)
        a_task.terminate_and_join()
      return
Esempio n. 44
0
class VDOM_semaphore(object):

    def __init__(self, counter=1):
        self.__semaphore = BoundedSemaphore(counter)

    def lock(self):
        return self.__semaphore.acquire()

    def unlock(self):
        self.__semaphore.release()

    def __enter__(self):
        self.lock()
        return self

    def __exit__(self, extype, exvalue, traceback):
        self.unlock()
Esempio n. 45
0
class IMAPServer:
    """Initializes all variables from an IMAPRepository() instance

    Various functions, such as acquireconnection() return an IMAP4
    object on which we can operate.

    Public instance variables are: self.:
     delim The server's folder delimiter. Only valid after acquireconnection()
     """
    GSS_STATE_STEP = 0
    GSS_STATE_WRAP = 1
    def __init__(self, repos):
        self.ui = getglobalui()
        self.repos = repos
        self.config = repos.getconfig()

        self.preauth_tunnel = repos.getpreauthtunnel()
        self.transport_tunnel = repos.gettransporttunnel()
        if self.preauth_tunnel and self.transport_tunnel:
            raise OfflineImapError('%s: '% repos + \
              'you must enable precisely one '
              'type of tunnel (preauth or transport), '
              'not both', OfflineImapError.ERROR.REPO)
        self.tunnel = \
          self.preauth_tunnel if self.preauth_tunnel \
          else self.transport_tunnel

        self.username = \
          None if self.preauth_tunnel else repos.getuser()
        self.user_identity = repos.get_remote_identity()
        self.authmechs = repos.get_auth_mechanisms()
        self.password = None
        self.passworderror = None
        self.goodpassword = None

        self.usessl = repos.getssl()
        self.hostname = \
          None if self.preauth_tunnel else repos.gethost()
        self.port = repos.getport()
        if self.port == None:
            self.port = 993 if self.usessl else 143
        self.sslclientcert = repos.getsslclientcert()
        self.sslclientkey = repos.getsslclientkey()
        self.sslcacertfile = repos.getsslcacertfile()
        if self.sslcacertfile is None:
            self.__verifycert = None # disable cert verification
        self.fingerprint = repos.get_ssl_fingerprint()
        self.sslversion = repos.getsslversion()

        self.delim = None
        self.root = None
        self.maxconnections = repos.getmaxconnections()
        self.availableconnections = []
        self.assignedconnections = []
        self.lastowner = {}
        self.semaphore = BoundedSemaphore(self.maxconnections)
        self.connectionlock = Lock()
        self.reference = repos.getreference()
        self.idlefolders = repos.getidlefolders()
        self.gss_step = self.GSS_STATE_STEP
        self.gss_vc = None
        self.gssapi = False

    def __getpassword(self):
        """Returns the server password or None"""
        if self.goodpassword != None: # use cached good one first
            return self.goodpassword

        if self.password != None and self.passworderror == None:
            return self.password # non-failed preconfigured one

        # get 1) configured password first 2) fall back to asking via UI
        self.password = self.repos.getpassword() or \
                        self.ui.getpass(self.repos.getname(), self.config,
                                        self.passworderror)
        self.passworderror = None
        return self.password

    # XXX: is this function used anywhere?
    def getroot(self):
        """Returns this server's folder root. Can only be called after one
        or more calls to acquireconnection."""

        return self.root


    def releaseconnection(self, connection, drop_conn=False):
        """Releases a connection, returning it to the pool.

        :param drop_conn: If True, the connection will be released and
           not be reused. This can be used to indicate broken connections."""

        if connection is None: return #noop on bad connection
        self.connectionlock.acquire()
        self.assignedconnections.remove(connection)
        # Don't reuse broken connections
        if connection.Terminate or drop_conn:
            connection.logout()
        else:
            self.availableconnections.append(connection)
        self.connectionlock.release()
        self.semaphore.release()

    def __md5handler(self, response):
        challenge = response.strip()
        self.ui.debug('imap', '__md5handler: got challenge %s'% challenge)

        passwd = self.__getpassword()
        retval = self.username + ' ' + hmac.new(passwd, challenge).hexdigest()
        self.ui.debug('imap', '__md5handler: returning %s'% retval)
        return retval

    def __loginauth(self, imapobj):
        """ Basic authentication via LOGIN command."""

        self.ui.debug('imap', 'Attempting IMAP LOGIN authentication')
        imapobj.login(self.username, self.__getpassword())


    def __plainhandler(self, response):
        """Implements SASL PLAIN authentication, RFC 4616,
          http://tools.ietf.org/html/rfc4616"""

        authc = self.username
        passwd = self.__getpassword()
        authz = ''
        if self.user_identity != None:
            authz = self.user_identity
        NULL = u'\x00'
        retval = NULL.join((authz, authc, passwd)).encode('utf-8')
        self.ui.debug('imap', '__plainhandler: returning %s' % retval)
        return retval


    # XXX: describe function
    def __gssauth(self, response):
        data = base64.b64encode(response)
        try:
            if self.gss_step == self.GSS_STATE_STEP:
                if not self.gss_vc:
                    rc, self.gss_vc = kerberos.authGSSClientInit(
                        'imap@' + self.hostname)
                    response = kerberos.authGSSClientResponse(self.gss_vc)
                rc = kerberos.authGSSClientStep(self.gss_vc, data)
                if rc != kerberos.AUTH_GSS_CONTINUE:
                   self.gss_step = self.GSS_STATE_WRAP
            elif self.gss_step == self.GSS_STATE_WRAP:
                rc = kerberos.authGSSClientUnwrap(self.gss_vc, data)
                response = kerberos.authGSSClientResponse(self.gss_vc)
                rc = kerberos.authGSSClientWrap(
                    self.gss_vc, response, self.username)
            response = kerberos.authGSSClientResponse(self.gss_vc)
        except kerberos.GSSError as err:
            # Kerberos errored out on us, respond with None to cancel the
            # authentication
            self.ui.debug('imap', '%s: %s'% (err[0][0], err[1][0]))
            return None

        if not response:
            response = ''
        return base64.b64decode(response)


    def __start_tls(self, imapobj):
        if 'STARTTLS' in imapobj.capabilities and not self.usessl:
            self.ui.debug('imap', 'Using STARTTLS connection')
            try:
                imapobj.starttls()
            except imapobj.error as e:
                raise OfflineImapError("Failed to start "
                  "TLS connection: %s" % str(e),
                  OfflineImapError.ERROR.REPO, None, exc_info()[2])


    ## All __authn_* procedures are helpers that do authentication.
    ## They are class methods that take one parameter, IMAP object.
    ##
    ## Each function should return True if authentication was
    ## successful and False if authentication wasn't even tried
    ## for some reason (but not when IMAP has no such authentication
    ## capability, calling code checks that).
    ##
    ## Functions can also raise exceptions; two types are special
    ## and will be handled by the calling code:
    ##
    ## - imapobj.error means that there was some error that
    ##   comes from imaplib2;
    ##
    ## - OfflineImapError means that function detected some
    ##   problem by itself.

    def __authn_gssapi(self, imapobj):
        if not have_gss:
            return False

        self.connectionlock.acquire()
        try:
            imapobj.authenticate('GSSAPI', self.__gssauth)
            return True
        except imapobj.error as e:
            self.gssapi = False
            raise
        else:
            self.gssapi = True
            kerberos.authGSSClientClean(self.gss_vc)
            self.gss_vc = None
            self.gss_step = self.GSS_STATE_STEP
        finally:
            self.connectionlock.release()

    def __authn_cram_md5(self, imapobj):
        imapobj.authenticate('CRAM-MD5', self.__md5handler)
        return True

    def __authn_plain(self, imapobj):
        imapobj.authenticate('PLAIN', self.__plainhandler)
        return True

    def __authn_login(self, imapobj):
        # Use LOGIN command, unless LOGINDISABLED is advertized
        # (per RFC 2595)
        if 'LOGINDISABLED' in imapobj.capabilities:
            raise OfflineImapError("IMAP LOGIN is "
              "disabled by server.  Need to use SSL?",
               OfflineImapError.ERROR.REPO)
        else:
            self.__loginauth(imapobj)
            return True


    def __authn_helper(self, imapobj):
        """Authentication machinery for self.acquireconnection().

        Raises OfflineImapError() of type ERROR.REPO when
        there are either fatal problems or no authentications
        succeeded.

        If any authentication method succeeds, routine should exit:
        warnings for failed methods are to be produced in the
        respective except blocks."""

        # Authentication routines, hash keyed by method name
        # with value that is a tuple with
        # - authentication function,
        # - tryTLS flag,
        # - check IMAP capability flag.
        auth_methods = {
          "GSSAPI": (self.__authn_gssapi, False, True),
          "CRAM-MD5": (self.__authn_cram_md5, True, True),
          "PLAIN": (self.__authn_plain, True, True),
          "LOGIN": (self.__authn_login, True, False),
        }
        # Stack stores pairs of (method name, exception)
        exc_stack = []
        tried_to_authn = False
        tried_tls = False
        mechs = self.authmechs

        # GSSAPI must be tried first: we will probably go TLS after it
        # and GSSAPI mustn't be tunneled over TLS.
        if "GSSAPI" in mechs:
            mechs.remove("GSSAPI")
            mechs.insert(0, "GSSAPI")

        for m in mechs:
            if m not in auth_methods:
                raise Exception("Bad authentication method %s, "
                  "please, file OfflineIMAP bug" % m)

            func, tryTLS, check_cap = auth_methods[m]

            # TLS must be initiated before checking capabilities:
            # they could have been changed after STARTTLS.
            if tryTLS and not tried_tls:
                tried_tls = True
                self.__start_tls(imapobj)

            if check_cap:
                cap = "AUTH=" + m
                if cap not in imapobj.capabilities:
                    continue

            tried_to_authn = True
            self.ui.debug('imap', u'Attempting '
              '%s authentication'% m)
            try:
                if func(imapobj):
                    return
            except (imapobj.error, OfflineImapError) as e:
                self.ui.warn('%s authentication failed: %s'% (m, e))
                exc_stack.append((m, e))

        if len(exc_stack):
            msg = "\n\t".join(map(
              lambda x: ": ".join((x[0], str(x[1]))),
              exc_stack
            ))
            raise OfflineImapError("All authentication types "
              "failed:\n\t%s" % msg, OfflineImapError.ERROR.REPO)

        if not tried_to_authn:
            methods = ", ".join(map(
              lambda x: x[5:], filter(lambda x: x[0:5] == "AUTH=",
               imapobj.capabilities)
            ))
            raise OfflineImapError(u"Repository %s: no supported "
              "authentication mechanisms found; configured %s, "
              "server advertises %s"% (self.repos,
              ", ".join(self.authmechs), methods),
              OfflineImapError.ERROR.REPO)


    # XXX: move above, closer to releaseconnection()
    def acquireconnection(self):
        """Fetches a connection from the pool, making sure to create a new one
        if needed, to obey the maximum connection limits, etc.
        Opens a connection to the server and returns an appropriate
        object."""

        self.semaphore.acquire()
        self.connectionlock.acquire()
        curThread = currentThread()
        imapobj = None

        if len(self.availableconnections): # One is available.
            # Try to find one that previously belonged to this thread
            # as an optimization.  Start from the back since that's where
            # they're popped on.
            imapobj = None
            for i in range(len(self.availableconnections) - 1, -1, -1):
                tryobj = self.availableconnections[i]
                if self.lastowner[tryobj] == curThread.ident:
                    imapobj = tryobj
                    del(self.availableconnections[i])
                    break
            if not imapobj:
                imapobj = self.availableconnections[0]
                del(self.availableconnections[0])
            self.assignedconnections.append(imapobj)
            self.lastowner[imapobj] = curThread.ident
            self.connectionlock.release()
            return imapobj

        self.connectionlock.release()   # Release until need to modify data

        # Must be careful here that if we fail we should bail out gracefully
        # and release locks / threads so that the next attempt can try...
        success = 0
        try:
            while not success:
                # Generate a new connection.
                if self.tunnel:
                    self.ui.connecting('tunnel', self.tunnel)
                    imapobj = imaplibutil.IMAP4_Tunnel(self.tunnel,
                                                       timeout=socket.getdefaulttimeout())
                    success = 1
                elif self.usessl:
                    self.ui.connecting(self.hostname, self.port)
                    imapobj = imaplibutil.WrappedIMAP4_SSL(self.hostname,
                                                           self.port,
                                                           self.sslclientkey,
                                                           self.sslclientcert,
                                                           self.sslcacertfile,
                                                           self.__verifycert,
                                                           self.sslversion,
                                                           timeout=socket.getdefaulttimeout(),
                                                           fingerprint=self.fingerprint
                                                           )
                else:
                    self.ui.connecting(self.hostname, self.port)
                    imapobj = imaplibutil.WrappedIMAP4(self.hostname, self.port,
                                                       timeout=socket.getdefaulttimeout())

                if not self.preauth_tunnel:
                    try:
                        self.__authn_helper(imapobj)
                        self.goodpassword = self.password
                        success = 1
                    except OfflineImapError as e:
                        self.passworderror = str(e)
                        raise

            # Enable compression
            if self.repos.getconfboolean('usecompression', 0):
                imapobj.enable_compression()

            # update capabilities after login, e.g. gmail serves different ones
            typ, dat = imapobj.capability()
            if dat != [None]:
                imapobj.capabilities = tuple(dat[-1].upper().split())

            if self.delim == None:
                listres = imapobj.list(self.reference, '""')[1]
                if listres == [None] or listres == None:
                    # Some buggy IMAP servers do not respond well to LIST "" ""
                    # Work around them.
                    listres = imapobj.list(self.reference, '"*"')[1]
                if listres == [None] or listres == None:
                    # No Folders were returned. This occurs, e.g. if the
                    # 'reference' prefix does not exist on the mail
                    # server. Raise exception.
                    err = "Server '%s' returned no folders in '%s'"% \
                        (self.repos.getname(), self.reference)
                    self.ui.warn(err)
                    raise Exception(err)
                self.delim, self.root = \
                            imaputil.imapsplit(listres[0])[1:]
                self.delim = imaputil.dequote(self.delim)
                self.root = imaputil.dequote(self.root)

            with self.connectionlock:
                self.assignedconnections.append(imapobj)
                self.lastowner[imapobj] = curThread.ident
            return imapobj
        except Exception as e:
            """If we are here then we did not succeed in getting a
            connection - we should clean up and then re-raise the
            error..."""

            self.semaphore.release()

            severity = OfflineImapError.ERROR.REPO
            if type(e) == gaierror:
                #DNS related errors. Abort Repo sync
                #TODO: special error msg for e.errno == 2 "Name or service not known"?
                reason = "Could not resolve name '%s' for repository "\
                         "'%s'. Make sure you have configured the ser"\
                         "ver name correctly and that you are online."%\
                         (self.hostname, self.repos)
                raise OfflineImapError(reason, severity), None, exc_info()[2]

            elif isinstance(e, SSLError) and e.errno == errno.EPERM:
                # SSL unknown protocol error
                # happens e.g. when connecting via SSL to a non-SSL service
                if self.port != 993:
                    reason = "Could not connect via SSL to host '%s' and non-s"\
                        "tandard ssl port %d configured. Make sure you connect"\
                        " to the correct port." % (self.hostname, self.port)
                else:
                    reason = "Unknown SSL protocol connecting to host '%s' for "\
                         "repository '%s'. OpenSSL responded:\n%s"\
                         % (self.hostname, self.repos, e)
                raise OfflineImapError(reason, severity), None, exc_info()[2]

            elif isinstance(e, socket.error) and e.args[0] == errno.ECONNREFUSED:
                # "Connection refused", can be a non-existing port, or an unauthorized
                # webproxy (open WLAN?)
                reason = "Connection to host '%s:%d' for repository '%s' was "\
                    "refused. Make sure you have the right host and port "\
                    "configured and that you are actually able to access the "\
                    "network." % (self.hostname, self.port, self.repos)
                raise OfflineImapError(reason, severity), None, exc_info()[2]
            # Could not acquire connection to the remote;
            # socket.error(last_error) raised
            if str(e)[:24] == "can't open socket; error":
                raise OfflineImapError("Could not connect to remote server '%s' "\
                    "for repository '%s'. Remote does not answer."
                    % (self.hostname, self.repos),
                    OfflineImapError.ERROR.REPO), None, exc_info()[2]
            else:
                # re-raise all other errors
                raise

    def connectionwait(self):
        """Waits until there is a connection available.

        Note that between the time that a connection becomes available and the
        time it is requested, another thread may have grabbed it.  This function
        is mainly present as a way to avoid spawning thousands of threads to
        copy messages, then have them all wait for 3 available connections.
        It's OK if we have maxconnections + 1 or 2 threads, which is what this
        will help us do."""

        self.semaphore.acquire()
        self.semaphore.release()

    def close(self):
        # Make sure I own all the semaphores.  Let the threads finish
        # their stuff.  This is a blocking method.
        with self.connectionlock:
            # first, wait till all connections had been released.
            # TODO: won't work IMHO, as releaseconnection() also
            # requires the connectionlock, leading to a potential
            # deadlock! Audit & check!
            threadutil.semaphorereset(self.semaphore, self.maxconnections)
            for imapobj in self.assignedconnections + self.availableconnections:
                imapobj.logout()
            self.assignedconnections = []
            self.availableconnections = []
            self.lastowner = {}
            # reset kerberos state
            self.gss_step = self.GSS_STATE_STEP
            self.gss_vc = None
            self.gssapi = False

    def keepalive(self, timeout, event):
        """Sends a NOOP to each connection recorded.

        It will wait a maximum of timeout seconds between doing this, and will
        continue to do so until the Event object as passed is true.  This method
        is expected to be invoked in a separate thread, which should be join()'d
        after the event is set."""

        self.ui.debug('imap', 'keepalive thread started')
        while not event.isSet():
            self.connectionlock.acquire()
            numconnections = len(self.assignedconnections) + \
                             len(self.availableconnections)
            self.connectionlock.release()

            threads = []
            for i in range(numconnections):
                self.ui.debug('imap', 'keepalive: processing connection %d of %d'% (i, numconnections))
                if len(self.idlefolders) > i:
                    # IDLE thread
                    idler = IdleThread(self, self.idlefolders[i])
                else:
                    # NOOP thread
                    idler = IdleThread(self)
                idler.start()
                threads.append(idler)

            self.ui.debug('imap', 'keepalive: waiting for timeout')
            event.wait(timeout)
            self.ui.debug('imap', 'keepalive: after wait')

            for idler in threads:
                # Make sure all the commands have completed.
                idler.stop()
                idler.join()
            self.ui.debug('imap', 'keepalive: all threads joined')
        self.ui.debug('imap', 'keepalive: event is set; exiting')
        return

    def __verifycert(self, cert, hostname):
        """Verify that cert (in socket.getpeercert() format) matches hostname.

        CRLs are not handled.
        Returns error message if any problems are found and None on success."""

        errstr = "CA Cert verifying failed: "
        if not cert:
            return ('%s no certificate received'% errstr)
        dnsname = hostname.lower()
        certnames = []

        # cert expired?
        notafter = cert.get('notAfter')
        if notafter:
            if time.time() >= cert_time_to_seconds(notafter):
                return '%s certificate expired %s'% (errstr, notafter)

        # First read commonName
        for s in cert.get('subject', []):
            key, value = s[0]
            if key == 'commonName':
                certnames.append(value.lower())
        if len(certnames) == 0:
            return ('%s no commonName found in certificate'% errstr)

        # Then read subjectAltName
        for key, value in cert.get('subjectAltName', []):
            if key == 'DNS':
                certnames.append(value.lower())

        # And finally try to match hostname with one of these names
        for certname in certnames:
            if (certname == dnsname or
                '.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1]):
                return None

        return ('%s no matching domain name found in certificate'% errstr)
Esempio n. 46
0
class SocketIO(Retry, Block):

    """ A block for communicating with a socket.io server.

    Properties:
        host (str): location of the socket.io server.
        port (int): socket.io server port.
        room (str): socket.io room.
        content (Expression): Content to send to socket.io room.
        listen (bool): Whether or not the block should listen to messages
            FROM the SocketIo room.

    """
    version = VersionProperty('2.0.0')
    host = StringProperty(title='SocketIo Host', default="127.0.0.1")
    port = IntProperty(title='Port', default=443)
    room = StringProperty(title='Socket.io Room', default="default")
    content = Property(
        title='Content', default="{{ json.dumps($to_dict(), default=str) }}",
        visible=False)
    listen = BoolProperty(title="Listen to SocketIo Room", default=False)
    connect_timeout = TimeDeltaProperty(
        title="Connect timeout",
        default={"seconds": 10},
        visible=False)
    start_without_server = BoolProperty(title="Allow Service Start On Failed "
                                              "Connection", default=False)

    def __init__(self):
        super().__init__()
        self._sid = ""
        self._hb_interval = -1  # Heartbeat interval
        self._hb_timeout = -1  # Heartbeat timeout
        self._transports = ""  # Valid transports
        self._client = None
        self._client_ready = False
        # This bounded semaphore will ensure that only one thread can be
        # connecting to the client at a time
        self._connection_semaphore = BoundedSemaphore(1)
        self._socket_url_protocol = "http"
        self._socket_url_base = ""
        self._stopping = False
        self._disconnect_thread = None

    def configure(self, context):
        super().configure(context)
        self._build_socket_url_base()
        # Connect to the socket before starting the block
        # This connection won't happen with a retry, so if the socket
        # server is not running, the connection will fail. In this case,
        # if the user has specified that the service should start anyways,
        # attempt to reconnect based off of the given retry strategy.

        try:
            self._connect_to_socket()
        except:
            if self.start_without_server():
                self.logger.info('Could not connect to web socket. Service '
                                 'will be started and this block will attempt '
                                 'to reconnect using given retry strategy.')
                self._disconnect_thread = spawn(self.handle_disconnect)
            else:
                raise

    def stop(self):
        """ Stop the block by closing the client.

        """
        self._stopping = True
        self.logger.debug("Shutting down socket.io client")

        if self._disconnect_thread:
            self._disconnect_thread.join()

        self._close_client()
        super().stop()

    def handle_disconnect(self):
        """ What to do when the client reports a problem """
        # Don't need to reconnect if we are stopping, the close was expected
        if self._stopping:
            return

        try:
            self.logger.info("Attempting to reconnect to the socket")
            self.execute_with_retry(self.reconnect_client)
        except:
            self.logger.exception("Failed to reconnect - giving up")

            status_signal = BlockStatusSignal(
                RunnerStatus.error, 'Out of retries.')
            self.notify_management_signal(status_signal)

    def reconnect_client(self):
        # Only allow one connection at a time by wrapping this call in a
        # bounded semaphore
        self.logger.debug("Acquiring connection semaphore")
        if not self._connection_semaphore.acquire(blocking=False):
            self.logger.warning("Already reconnecting, ignoring request")
            return
        self.logger.debug("Connection semaphore acquired")
        try:
            self._close_client()
            self._connect_to_socket()
        finally:
            self.logger.debug("Releasing connection semaphore")
            self._connection_semaphore.release()

    def handle_data(self, data):
        """Handle data coming from the web socket

        data will be a dictionary, containing an event and data
        that was sent, in the form of a python dictionary.
        """
        if data.get('event', '') != 'recvData':
            # We don't care about this event, it's not data
            return
        try:
            sig = Signal(data['data'])
            self.notify_signals([sig])
        except:
            self.logger.warning("Could not parse socket data", exc_info=True)

    def _connect_to_socket(self):
        connected = Event()
        self._do_handshake()

        url = self._get_ws_url()
        self.logger.info("Connecting to %s" % url)
        self._create_client(url, connected)
        self.logger.info("Connected to socket successfully")

        # Give the client some time to report that it's connected,
        # don't return from this method until that happens
        if not connected.wait(self.connect_timeout().total_seconds()):
            self.logger.warning("Connect response not received in time")
            self._close_client()
            raise Exception("Did not connect in time")
        else:
            self._client_ready = True

    def process_signals(self, signals):
        """ Send content to the socket.io room. """

        # Don't do any processing or sending if the block is stopping.
        # The connection may be closed and we don't want to re-open
        if self._stopping:
            return

        if not self._client or not self._client_ready:
            self.logger.warning(
                "Tried to send to a non-existent or "
                "terminated web socket, dropping signals")
            return

        for signal in signals:
            try:
                message = self.content(signal)
                self._client.sender.send_event('pub', message)
            except:
                self.logger.exception("Could not send message")

    def _close_client(self):
        """ Safely close the client and remove the reference """
        try:
            # The client isn't ready if we're closing
            self._client_ready = False
            # Try to close the client if it's open
            if self._client:
                self._client.close()
        except:
            # If we couldn't close, it's fine. Either the client wasn't
            # opened or it didn't want to respond. That's what we get for
            # being nice and cleaning up our connection
            self.logger.info("Error closing client", exc_info=True)
        finally:
            self._client = None

    def _create_client(self, url, connected_event):
        """ Create a WS client object.

        This will close any existing clients and re-create a client
        object.

        By the time this function returns, the client is connected and
        ready to send data.
        """
        # We will only want to handle incoming data if the block
        # has been configured to do so
        if self.listen():
            data_callback = self.handle_data
        else:
            data_callback = None

        self._client = SocketIOWebSocketClient(
            url=url,
            room=self.room(),
            connect_event=connected_event,
            heartbeat_interval=self._hb_interval,
            heartbeat_timeout=self._hb_timeout,
            data_callback=data_callback,
            disconnect_callback=self.handle_disconnect,
            logger=self.logger)

        self._client.connect()

    def _build_socket_url_base(self):
        host = self.host()
        # Default to http protocol
        # See if they included an http or https in front of the host,
        host_matched = re.match('^(https?)://(.*)$', host)
        if host_matched:
            self._socket_url_protocol = host_matched.group(1)
            host = host_matched.group(2)

        self._socket_url_base = "{}:{}/socket.io/".format(host, self.port())

    def _do_handshake(self):
        """ Perform the socket io handshake.

        This function will set the proper variables like heartbeat timeout
        and the sid. It will also make sure that websockets is a valid
        transport for this socket.io server.
        """
        handshake_url = self._get_handshake_url()
        self.logger.debug("Making handshake request to {}".format(
            handshake_url))

        handshake = requests.get(handshake_url)

        if handshake.status_code != 200:
            raise Exception("Could not complete handshake: %s" %
                            handshake.text)

        self._parse_handshake_response(handshake.text)

        self.logger.debug("Handshake successful, sid=%s" % self._sid)

        # Make sure the server reports that they can handle websockets
        if 'websocket' not in self._transports:
            raise Exception("Websocket is not a valid transport for server")

    def _get_handshake_url(self):
        """ Get the URL to perform the initial handshake request to """
        return "{}://{}?transport=polling".format(
            self._socket_url_protocol, self._socket_url_base)

    def _parse_handshake_response(self, resp_text):
        """ Parse a socket.io v1 handshake response.

        Expected response should look like:
            \0xxxx {"sid":"xxx", "upgrades":["websocket","polling",..],
            pingInterval:xxxx, pingTimeout:xxxx}
        """
        self.logger.debug("Parsing handshake response: {}".format(resp_text))
        matches = re.search('({.*})', resp_text)

        resp = json.loads(matches.group(1))

        self._sid = resp['sid']
        self._hb_interval = int(resp['pingInterval']) / 1000
        self._hb_timeout = int(resp['pingTimeout']) / 1000
        self._transports = resp['upgrades']

    def _get_ws_url(self):
        """ Get the websocket URL to communciate with """
        return "ws://{}?transport=websocket&sid={}".format(
            self._socket_url_base, self._sid)
Esempio n. 47
0
class IMAPServer:
    """Initializes all variables from an IMAPRepository() instance

    Various functions, such as acquireconnection() return an IMAP4
    object on which we can operate.

    Public instance variables are: self.:
     delim The server's folder delimiter. Only valid after acquireconnection()
     """
    GSS_STATE_STEP = 0
    GSS_STATE_WRAP = 1
    def __init__(self, repos):
        self.ui = getglobalui()
        self.repos = repos
        self.config = repos.getconfig()
        self.tunnel = repos.getpreauthtunnel()
        self.usessl = repos.getssl()
        self.username = None if self.tunnel else repos.getuser()
        self.password = None
        self.passworderror = None
        self.goodpassword = None
        self.hostname = None if self.tunnel else repos.gethost()
        self.port = repos.getport()
        if self.port == None:
            self.port = 993 if self.usessl else 143
        self.sslclientcert = repos.getsslclientcert()
        self.sslclientkey = repos.getsslclientkey()
        self.sslcacertfile = repos.getsslcacertfile()
        if self.sslcacertfile is None:
            self.verifycert = None # disable cert verification
        self.delim = None
        self.root = None
        self.maxconnections = repos.getmaxconnections()
        self.availableconnections = []
        self.assignedconnections = []
        self.lastowner = {}
        self.semaphore = BoundedSemaphore(self.maxconnections)
        self.connectionlock = Lock()
        self.reference = repos.getreference()
        self.idlefolders = repos.getidlefolders()
        self.gss_step = self.GSS_STATE_STEP
        self.gss_vc = None
        self.gssapi = False

    def getpassword(self):
        """Returns the server password or None"""
        if self.goodpassword != None: # use cached good one first
            return self.goodpassword

        if self.password != None and self.passworderror == None:
            return self.password # non-failed preconfigured one

        # get 1) configured password first 2) fall back to asking via UI
        self.password = self.repos.getpassword() or \
                        self.ui.getpass(self.repos.getname(), self.config,
                                        self.passworderror)
        self.passworderror = None
        return self.password

    def getroot(self):
        """Returns this server's folder root.  Can only be called after one
        or more calls to acquireconnection."""
        return self.root


    def releaseconnection(self, connection, drop_conn=False):
        """Releases a connection, returning it to the pool.

        :param drop_conn: If True, the connection will be released and
           not be reused. This can be used to indicate broken connections."""
        if connection is None: return #noop on bad connection
        self.connectionlock.acquire()
        self.assignedconnections.remove(connection)
        # Don't reuse broken connections
        if connection.Terminate or drop_conn:
            connection.logout()
        else:
            self.availableconnections.append(connection)
        self.connectionlock.release()
        self.semaphore.release()

    def md5handler(self, response):
        challenge = response.strip()
        self.ui.debug('imap', 'md5handler: got challenge %s' % challenge)

        passwd = self.getpassword()
        retval = self.username + ' ' + hmac.new(passwd, challenge).hexdigest()
        self.ui.debug('imap', 'md5handler: returning %s' % retval)
        return retval

    def plainauth(self, imapobj):
        self.ui.debug('imap', 'Attempting plain authentication')
        imapobj.login(self.username, self.getpassword())

    def gssauth(self, response):
        data = base64.b64encode(response)
        try:
            if self.gss_step == self.GSS_STATE_STEP:
                if not self.gss_vc:
                    rc, self.gss_vc = kerberos.authGSSClientInit('imap@' + 
                                                                 self.hostname)
                    response = kerberos.authGSSClientResponse(self.gss_vc)
                rc = kerberos.authGSSClientStep(self.gss_vc, data)
                if rc != kerberos.AUTH_GSS_CONTINUE:
                   self.gss_step = self.GSS_STATE_WRAP
            elif self.gss_step == self.GSS_STATE_WRAP:
                rc = kerberos.authGSSClientUnwrap(self.gss_vc, data)
                response = kerberos.authGSSClientResponse(self.gss_vc)
                rc = kerberos.authGSSClientWrap(self.gss_vc, response,
                                                self.username)
            response = kerberos.authGSSClientResponse(self.gss_vc)
        except kerberos.GSSError as err:
            # Kerberos errored out on us, respond with None to cancel the
            # authentication
            self.ui.debug('imap', '%s: %s' % (err[0][0], err[1][0]))
            return None

        if not response:
            response = ''
        return base64.b64decode(response)

    def acquireconnection(self):
        """Fetches a connection from the pool, making sure to create a new one
        if needed, to obey the maximum connection limits, etc.
        Opens a connection to the server and returns an appropriate
        object."""

        self.semaphore.acquire()
        self.connectionlock.acquire()
        curThread = currentThread()
        imapobj = None

        if len(self.availableconnections): # One is available.
            # Try to find one that previously belonged to this thread
            # as an optimization.  Start from the back since that's where
            # they're popped on.
            imapobj = None
            for i in range(len(self.availableconnections) - 1, -1, -1):
                tryobj = self.availableconnections[i]
                if self.lastowner[tryobj] == curThread.ident:
                    imapobj = tryobj
                    del(self.availableconnections[i])
                    break
            if not imapobj:
                imapobj = self.availableconnections[0]
                del(self.availableconnections[0])
            self.assignedconnections.append(imapobj)
            self.lastowner[imapobj] = curThread.ident
            self.connectionlock.release()
            return imapobj
        
        self.connectionlock.release()   # Release until need to modify data

        """ Must be careful here that if we fail we should bail out gracefully
        and release locks / threads so that the next attempt can try...
        """
        success = 0
        try:
            while not success:
                # Generate a new connection.
                if self.tunnel:
                    self.ui.connecting('tunnel', self.tunnel)
                    imapobj = imaplibutil.IMAP4_Tunnel(self.tunnel,
                                                       timeout=socket.getdefaulttimeout())
                    success = 1
                elif self.usessl:
                    self.ui.connecting(self.hostname, self.port)
                    fingerprint = self.repos.get_ssl_fingerprint()
                    imapobj = imaplibutil.WrappedIMAP4_SSL(self.hostname,
                                                           self.port,
                                                           self.sslclientkey,
                                                           self.sslclientcert,
                                                           self.sslcacertfile,
                                                           self.verifycert,
                                                           timeout=socket.getdefaulttimeout(),
                                                           fingerprint=fingerprint
                                                           )
                else:
                    self.ui.connecting(self.hostname, self.port)
                    imapobj = imaplibutil.WrappedIMAP4(self.hostname, self.port,
                                                       timeout=socket.getdefaulttimeout())

                if not self.tunnel:
                    try:
                        # Try GSSAPI and continue if it fails
                        if 'AUTH=GSSAPI' in imapobj.capabilities and have_gss:
                            self.connectionlock.acquire()
                            self.ui.debug('imap',
                                'Attempting GSSAPI authentication')
                            try:
                                imapobj.authenticate('GSSAPI', self.gssauth)
                            except imapobj.error as val:
                                self.gssapi = False
                                self.ui.debug('imap',
                                    'GSSAPI Authentication failed')
                            else:
                                self.gssapi = True
                                kerberos.authGSSClientClean(self.gss_vc)
                                self.gss_vc = None
                                self.gss_step = self.GSS_STATE_STEP
                                #if we do self.password = None then the next attempt cannot try...
                                #self.password = None
                            self.connectionlock.release()

                        if not self.gssapi:
                            if 'STARTTLS' in imapobj.capabilities and not\
                                    self.usessl:
                                self.ui.debug('imap',
                                              'Using STARTTLS connection')
                                imapobj.starttls()

                            if 'AUTH=CRAM-MD5' in imapobj.capabilities:
                                self.ui.debug('imap',
                                           'Attempting CRAM-MD5 authentication')
                                try:
                                    imapobj.authenticate('CRAM-MD5',
                                                         self.md5handler)
                                except imapobj.error as val:
                                    self.plainauth(imapobj)
                            else:
                                # Use plaintext login, unless
                                # LOGINDISABLED (RFC2595)
                                if 'LOGINDISABLED' in imapobj.capabilities:
                                    raise OfflineImapError("Plaintext login "
                                       "disabled by server. Need to use SSL?",
                                        OfflineImapError.ERROR.REPO)
                                self.plainauth(imapobj)
                        # Would bail by here if there was a failure.
                        success = 1
                        self.goodpassword = self.password
                    except imapobj.error as val:
                        self.passworderror = str(val)
                        raise

            # update capabilities after login, e.g. gmail serves different ones
            typ, dat = imapobj.capability()
            if dat != [None]:
                imapobj.capabilities = tuple(dat[-1].upper().split())

            if self.delim == None:
                listres = imapobj.list(self.reference, '""')[1]
                if listres == [None] or listres == None:
                    # Some buggy IMAP servers do not respond well to LIST "" ""
                    # Work around them.
                    listres = imapobj.list(self.reference, '"*"')[1]
                if listres == [None] or listres == None:
                    # No Folders were returned. This occurs, e.g. if the
                    # 'reference' prefix does not exist on the mail
                    # server. Raise exception.
                    err = "Server '%s' returned no folders in '%s'" % \
                        (self.repos.getname(), self.reference)
                    self.ui.warn(err)
                    raise Exception(err)
                self.delim, self.root = \
                            imaputil.imapsplit(listres[0])[1:]
                self.delim = imaputil.dequote(self.delim)
                self.root = imaputil.dequote(self.root)

            self.connectionlock.acquire()
            self.assignedconnections.append(imapobj)
            self.lastowner[imapobj] = curThread.ident
            self.connectionlock.release()
            return imapobj
        except Exception as e:
            """If we are here then we did not succeed in getting a
            connection - we should clean up and then re-raise the
            error..."""
            self.semaphore.release()

            if(self.connectionlock.locked()):
                self.connectionlock.release()

            severity = OfflineImapError.ERROR.REPO
            if type(e) == gaierror:
                #DNS related errors. Abort Repo sync
                #TODO: special error msg for e.errno == 2 "Name or service not known"?
                reason = "Could not resolve name '%s' for repository "\
                         "'%s'. Make sure you have configured the ser"\
                         "ver name correctly and that you are online."%\
                         (self.hostname, self.repos)
                raise OfflineImapError(reason, severity)

            elif isinstance(e, SSLError) and e.errno == 1:
                # SSL unknown protocol error
                # happens e.g. when connecting via SSL to a non-SSL service
                if self.port != 993:
                    reason = "Could not connect via SSL to host '%s' and non-s"\
                        "tandard ssl port %d configured. Make sure you connect"\
                        " to the correct port." % (self.hostname, self.port)
                else:
                    reason = "Unknown SSL protocol connecting to host '%s' for"\
                         "repository '%s'. OpenSSL responded:\n%s"\
                         % (self.hostname, self.repos, e)
                raise OfflineImapError(reason, severity)

            elif isinstance(e, socket.error) and e.args[0] == errno.ECONNREFUSED:
                # "Connection refused", can be a non-existing port, or an unauthorized
                # webproxy (open WLAN?)
                reason = "Connection to host '%s:%d' for repository '%s' was "\
                    "refused. Make sure you have the right host and port "\
                    "configured and that you are actually able to access the "\
                    "network." % (self.hostname, self.port, self.reposname)
                raise OfflineImapError(reason, severity)
            # Could not acquire connection to the remote;
            # socket.error(last_error) raised
            if str(e)[:24] == "can't open socket; error":
                raise OfflineImapError("Could not connect to remote server '%s' "\
                    "for repository '%s'. Remote does not answer."
                    % (self.hostname, self.repos),
                    OfflineImapError.ERROR.REPO)
            else:
                # re-raise all other errors
                raise

    def connectionwait(self):
        """Waits until there is a connection available.  Note that between
        the time that a connection becomes available and the time it is
        requested, another thread may have grabbed it.  This function is
        mainly present as a way to avoid spawning thousands of threads
        to copy messages, then have them all wait for 3 available connections.
        It's OK if we have maxconnections + 1 or 2 threads, which is what
        this will help us do."""
        self.semaphore.acquire()
        self.semaphore.release()

    def close(self):
        # Make sure I own all the semaphores.  Let the threads finish
        # their stuff.  This is a blocking method.
        with self.connectionlock:
            # first, wait till all connections had been released.
            # TODO: won't work IMHO, as releaseconnection() also
            # requires the connectionlock, leading to a potential
            # deadlock! Audit & check!
            threadutil.semaphorereset(self.semaphore, self.maxconnections)
            for imapobj in self.assignedconnections + self.availableconnections:
                imapobj.logout()
            self.assignedconnections = []
            self.availableconnections = []
            self.lastowner = {}
            # reset kerberos state
            self.gss_step = self.GSS_STATE_STEP
            self.gss_vc = None
            self.gssapi = False

    def keepalive(self, timeout, event):
        """Sends a NOOP to each connection recorded.   It will wait a maximum
        of timeout seconds between doing this, and will continue to do so
        until the Event object as passed is true.  This method is expected
        to be invoked in a separate thread, which should be join()'d after
        the event is set."""
        self.ui.debug('imap', 'keepalive thread started')
        while not event.isSet():
            self.connectionlock.acquire()
            numconnections = len(self.assignedconnections) + \
                             len(self.availableconnections)
            self.connectionlock.release()

            threads = []
            for i in range(numconnections):
                self.ui.debug('imap', 'keepalive: processing connection %d of %d' % (i, numconnections))
                if len(self.idlefolders) > i:
                    # IDLE thread
                    idler = IdleThread(self, self.idlefolders[i])
                else:
                    # NOOP thread
                    idler = IdleThread(self)
                idler.start()
                threads.append(idler)

            self.ui.debug('imap', 'keepalive: waiting for timeout')
            event.wait(timeout)
            self.ui.debug('imap', 'keepalive: after wait')

            for idler in threads:
                # Make sure all the commands have completed.
                idler.stop()
                idler.join()
            self.ui.debug('imap', 'keepalive: all threads joined')
        self.ui.debug('imap', 'keepalive: event is set; exiting')
        return

    def verifycert(self, cert, hostname):
        '''Verify that cert (in socket.getpeercert() format) matches hostname.
        CRLs are not handled.

        Returns error message if any problems are found and None on success.
        '''
        errstr = "CA Cert verifying failed: "
        if not cert:
            return ('%s no certificate received' % errstr)
        dnsname = hostname.lower()
        certnames = []

        # cert expired?
        notafter = cert.get('notAfter') 
        if notafter:
            if time.time() >= cert_time_to_seconds(notafter):
                return '%s certificate expired %s' % (errstr, notafter)

        # First read commonName
        for s in cert.get('subject', []):
            key, value = s[0]
            if key == 'commonName':
                certnames.append(value.lower())
        if len(certnames) == 0:
            return ('%s no commonName found in certificate' % errstr)

        # Then read subjectAltName
        for key, value in cert.get('subjectAltName', []):
            if key == 'DNS':
                certnames.append(value.lower())

        # And finally try to match hostname with one of these names
        for certname in certnames:
            if (certname == dnsname or
                '.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1]):
                return None

        return ('%s no matching domain name found in certificate' % errstr)
def growSortC():
	semaphore = BoundedSemaphore()
	semaphore.acquire()
	global_var.counter4 += 1
	semaphore.release()
def growSortT(tSum):
	semaphore = BoundedSemaphore()
	semaphore.acquire()
	global_var.timer4 += tSum
	semaphore.release()
Esempio n. 50
0
class IMAPServer(object):
    """Initializes all variables from an IMAPRepository() instance

    Various functions, such as acquireconnection() return an IMAP4
    object on which we can operate.

    Public instance variables are: self.:
     delim The server's folder delimiter. Only valid after acquireconnection()
    """

    def __init__(self, repos):
        """:repos: a IMAPRepository instance."""

        self.ui = getglobalui()
        self.repos = repos
        self.config = repos.getconfig()

        self.preauth_tunnel = repos.getpreauthtunnel()
        self.transport_tunnel = repos.gettransporttunnel()
        if self.preauth_tunnel and self.transport_tunnel:
            raise OfflineImapError('%s: '% repos +
                'you must enable precisely one '
                'type of tunnel (preauth or transport), '
                'not both', OfflineImapError.ERROR.REPO)
        self.tunnel = \
            self.preauth_tunnel if self.preauth_tunnel \
            else self.transport_tunnel

        self.username = \
            None if self.preauth_tunnel else repos.getuser()
        self.user_identity = repos.get_remote_identity()
        self.authmechs = repos.get_auth_mechanisms()
        self.password = None
        self.passworderror = None
        self.goodpassword = None

        self.usessl = repos.getssl()
        self.useipv6 = repos.getipv6()
        if self.useipv6 is True:
            self.af = socket.AF_INET6
        elif self.useipv6 is False:
            self.af = socket.AF_INET
        else:
            self.af = socket.AF_UNSPEC
        self.hostname = None if self.transport_tunnel or self.preauth_tunnel else repos.gethost()
        self.port = repos.getport()
        if self.port is None:
            self.port = 993 if self.usessl else 143
        self.sslclientcert = repos.getsslclientcert()
        self.sslclientkey = repos.getsslclientkey()
        self.sslcacertfile = repos.getsslcacertfile()
        if self.sslcacertfile is None:
            self.__verifycert = None # Disable cert verification.
                                     # This way of working sucks hard...
        self.fingerprint = repos.get_ssl_fingerprint()
        self.tlslevel = repos.gettlslevel()
        self.sslversion = repos.getsslversion()
        self.starttls = repos.getstarttls()

        if self.usessl \
           and self.tlslevel is not "tls_compat" \
           and self.sslversion is None:
            raise Exception("When 'tls_level' is not 'tls_compat' "
                "the 'ssl_version' must be set explicitly.")

        self.oauth2_refresh_token = repos.getoauth2_refresh_token()
        self.oauth2_access_token = repos.getoauth2_access_token()
        self.oauth2_client_id = repos.getoauth2_client_id()
        self.oauth2_client_secret = repos.getoauth2_client_secret()
        self.oauth2_request_url = repos.getoauth2_request_url()
        self.oauth2_access_token_expires_at = None

        self.delim = None
        self.root = None
        self.maxconnections = repos.getmaxconnections()
        self.availableconnections = []
        self.assignedconnections = []
        self.lastowner = {}
        self.semaphore = BoundedSemaphore(self.maxconnections)
        self.connectionlock = Lock()
        self.reference = repos.getreference()
        self.idlefolders = repos.getidlefolders()
        self.gss_vc = None
        self.gssapi = False

        # In order to support proxy connection, we have to override the
        # default socket instance with our own socksified socket instance.
        # We add this option to bypass the GFW in China.
        self.proxied_socket = self._get_proxy('proxy', socket.socket)

        # Turns out that the GFW in China is no longer blocking imap.gmail.com
        # However accounts.google.com (for oauth2) definitey is.  Therefore
        # it is not strictly necessary to use a proxy for *both* IMAP *and*
        # oauth2, so a new option is added: authproxy.

        # Set proxy for use in authentication (only) if desired.
        # If not set, is same as proxy option (compatible with current configs)
        # To use a proxied_socket but not an authproxied_socket
        # set authproxy = '' in config
        self.authproxied_socket = self._get_proxy('authproxy',
                                                  self.proxied_socket)

    def _get_proxy(self, proxysection, dfltsocket):
        _account_section = 'Account ' + self.repos.account.name
        if not self.config.has_option(_account_section, proxysection):
            return dfltsocket
        proxy = self.config.get(_account_section, proxysection)
        if proxy == '':
            # explicitly set no proxy (overrides default return of dfltsocket)
            return socket.socket

        # Powered by PySocks.
        try:
            import socks
            proxy_type, host, port = proxy.split(":")
            port = int(port)
            socks.setdefaultproxy(getattr(socks, proxy_type), host, port)
            return socks.socksocket
        except ImportError:
            self.ui.warn("PySocks not installed, ignoring proxy option.")
        except (AttributeError, ValueError) as e:
            self.ui.warn("Bad proxy option %s for account %s: %s "
                "Ignoring %s option."%
                (proxy, self.repos.account.name, e, proxysection))
        return dfltsocket

    def __getpassword(self):
        """Returns the server password or None"""

        if self.goodpassword != None: # use cached good one first
            return self.goodpassword

        if self.password != None and self.passworderror == None:
            return self.password # non-failed preconfigured one

        # get 1) configured password first 2) fall back to asking via UI
        self.password = self.repos.getpassword() or \
            self.ui.getpass(self.username, self.config, self.passworderror)
        self.passworderror = None
        return self.password

    def __md5handler(self, response):
        challenge = response.strip()
        self.ui.debug('imap', '__md5handler: got challenge %s'% challenge)

        passwd = self.__getpassword()
        retval = self.username + ' ' + hmac.new(passwd, challenge).hexdigest()
        self.ui.debug('imap', '__md5handler: returning %s'% retval)
        return retval

    def __loginauth(self, imapobj):
        """ Basic authentication via LOGIN command."""

        self.ui.debug('imap', 'Attempting IMAP LOGIN authentication')
        imapobj.login(self.username, self.__getpassword())

    def __plainhandler(self, response):
        """Implements SASL PLAIN authentication, RFC 4616,
          http://tools.ietf.org/html/rfc4616"""

        authc = self.username
        if not authc:
            raise OfflineImapError("No username provided for '%s'"
                                    % self.repos.getname(),
                                   OfflineImapError.ERROR.REPO)

        passwd = self.__getpassword()
        authz = b''
        if self.user_identity != None:
            authz = self.user_identity
        # At this point all authz, authc and passwd are expected bytes encoded
        # in UTF-8.
        NULL = b'\x00'
        retval = NULL.join((authz, authc, passwd))
        logsafe_retval = NULL.join((authz, authc, b'(passwd hidden for log)'))
        self.ui.debug('imap', '__plainhandler: returning %s'% logsafe_retval)
        return retval

    def __xoauth2handler(self, response):
        now = datetime.datetime.now()
        if self.oauth2_access_token_expires_at \
                and self.oauth2_access_token_expires_at < now:
            self.oauth2_access_token = None
            self.ui.debug('imap', 'xoauth2handler: oauth2_access_token expired')

        if self.oauth2_access_token is None:
            if self.oauth2_request_url is None:
                raise OfflineImapError("No remote oauth2_request_url for "
                    "repository '%s' specified."%
                    self, OfflineImapError.ERROR.REPO)

            # Generate new access token.
            params = {}
            params['client_id'] = self.oauth2_client_id
            params['client_secret'] = self.oauth2_client_secret
            params['refresh_token'] = self.oauth2_refresh_token
            params['grant_type'] = 'refresh_token'

            self.ui.debug('imap', 'xoauth2handler: url "%s"'%
                self.oauth2_request_url)
            self.ui.debug('imap', 'xoauth2handler: params "%s"'% params)

            original_socket = socket.socket
            socket.socket = self.authproxied_socket
            try:
                response = urllib.urlopen(
                    self.oauth2_request_url, urllib.urlencode(params)).read()
            except Exception as e:
                try:
                    msg = "%s (configuration is: %s)"% (e, str(params))
                except Exception as eparams:
                    msg = "%s [cannot display configuration: %s]"% (e, eparams)
                six.reraise(type(e), type(e)(msg), exc_info()[2])
            finally:
                socket.socket = original_socket

            resp = json.loads(response)
            self.ui.debug('imap', 'xoauth2handler: response "%s"'% resp)
            if u'error' in resp:
                raise OfflineImapError("xoauth2handler got: %s"% resp,
                    OfflineImapError.ERROR.REPO)
            self.oauth2_access_token = resp['access_token']
            if u'expires_in' in resp:
                self.oauth2_access_token_expires_at = now + datetime.timedelta(
                    seconds=resp['expires_in']/2
                )

        self.ui.debug('imap', 'xoauth2handler: access_token "%s expires %s"'% (
            self.oauth2_access_token, self.oauth2_access_token_expires_at))
        auth_string = 'user=%s\1auth=Bearer %s\1\1'% (
            self.username, self.oauth2_access_token)
        #auth_string = base64.b64encode(auth_string)
        self.ui.debug('imap', 'xoauth2handler: returning "%s"'% auth_string)
        return auth_string

    # Perform the next step handling a GSSAPI connection.
    # Client sends first, so token will be ignored if there is no context.
    def __gsshandler(self, token):
        if token == "":
            token = None
        try:
            if not self.gss_vc:
                name = gssapi.Name('imap@' + self.hostname,
                                   gssapi.NameType.hostbased_service)
                self.gss_vc = gssapi.SecurityContext(usage="initiate",
                                                     name=name)

            if not self.gss_vc.complete:
                response = self.gss_vc.step(token)
                return response if response else ""
            elif token is None:
                # uh... context is complete, so there's no negotiation we can
                # do.  But we also don't have a token, so we can't send any
                # kind of response.  Empirically, some (but not all) servers
                # seem to put us in this state, and seem fine with getting no
                # GSSAPI content in response, so give it to them.
                return ""

            # Don't bother checking qop because we're over a TLS channel
            # already.  But hey, if some server started encrypting tomorrow,
            # we'd be ready since krb5 always requests integrity and
            # confidentiality support.
            response = self.gss_vc.unwrap(token)

            # This is a behavior we got from pykerberos.  First byte is one,
            # first four bytes are preserved (pykerberos calls this a length).
            # Any additional bytes are username.
            reply = []
            reply[0:4] = response.message[0:4]
            reply[0] = '\x01'
            if self.username:
                reply[5:] = self.username
            reply = ''.join(reply)

            response = self.gss_vc.wrap(reply, response.encrypted)
            return response.message if response.message else ""
        except gssapi.exceptions.GSSError as err:
            # GSSAPI errored out on us; respond with None to cancel the
            # authentication
            self.ui.debug('imap', err.gen_message())
            return None

    def __start_tls(self, imapobj):
        if 'STARTTLS' in imapobj.capabilities and not self.usessl:
            self.ui.debug('imap', 'Using STARTTLS connection')
            try:
                imapobj.starttls()
            except imapobj.error as e:
                raise OfflineImapError("Failed to start "
                    "TLS connection: %s"% str(e),
                    OfflineImapError.ERROR.REPO, None, exc_info()[2])

    ## All __authn_* procedures are helpers that do authentication.
    ## They are class methods that take one parameter, IMAP object.
    ##
    ## Each function should return True if authentication was
    ## successful and False if authentication wasn't even tried
    ## for some reason (but not when IMAP has no such authentication
    ## capability, calling code checks that).
    ##
    ## Functions can also raise exceptions; two types are special
    ## and will be handled by the calling code:
    ##
    ## - imapobj.error means that there was some error that
    ##   comes from imaplib2;
    ##
    ## - OfflineImapError means that function detected some
    ##   problem by itself.

    def __authn_gssapi(self, imapobj):
        if not have_gss:
            return False

        self.connectionlock.acquire()
        try:
            imapobj.authenticate('GSSAPI', self.__gsshandler)
            return True
        except imapobj.error as e:
            self.gssapi = False
            raise
        else:
            self.gssapi = True
            self.gss_vc = None
        finally:
            self.connectionlock.release()

    def __authn_cram_md5(self, imapobj):
        imapobj.authenticate('CRAM-MD5', self.__md5handler)
        return True

    def __authn_plain(self, imapobj):
        imapobj.authenticate('PLAIN', self.__plainhandler)
        return True

    def __authn_xoauth2(self, imapobj):
        if self.oauth2_refresh_token is None \
                and self.oauth2_access_token is None:
            return False

        imapobj.authenticate('XOAUTH2', self.__xoauth2handler)
        return True

    def __authn_login(self, imapobj):
        # Use LOGIN command, unless LOGINDISABLED is advertized
        # (per RFC 2595)
        if 'LOGINDISABLED' in imapobj.capabilities:
            raise OfflineImapError("IMAP LOGIN is "
                "disabled by server.  Need to use SSL?",
                OfflineImapError.ERROR.REPO)
        else:
            self.__loginauth(imapobj)
            return True

    def __authn_helper(self, imapobj):
        """Authentication machinery for self.acquireconnection().

        Raises OfflineImapError() of type ERROR.REPO when
        there are either fatal problems or no authentications
        succeeded.

        If any authentication method succeeds, routine should exit:
        warnings for failed methods are to be produced in the
        respective except blocks."""

        # Stack stores pairs of (method name, exception)
        exc_stack = []
        tried_to_authn = False
        tried_tls = False
        # Authentication routines, hash keyed by method name
        # with value that is a tuple with
        # - authentication function,
        # - tryTLS flag,
        # - check IMAP capability flag.
        auth_methods = {
          "GSSAPI": (self.__authn_gssapi, False, True),
          "XOAUTH2": (self.__authn_xoauth2, True, True),
          "CRAM-MD5": (self.__authn_cram_md5, True, True),
          "PLAIN": (self.__authn_plain, True, True),
          "LOGIN": (self.__authn_login, True, False),
        }

        # GSSAPI is tried first by default: we will probably go TLS after it and
        # GSSAPI mustn't be tunneled over TLS.
        for m in self.authmechs:
            if m not in auth_methods:
                raise Exception("Bad authentication method %s, "
                  "please, file OfflineIMAP bug" % m)

            func, tryTLS, check_cap = auth_methods[m]

            # TLS must be initiated before checking capabilities:
            # they could have been changed after STARTTLS.
            if tryTLS and self.starttls and not tried_tls:
                tried_tls = True
                self.__start_tls(imapobj)

            if check_cap:
                cap = "AUTH=" + m
                if cap not in imapobj.capabilities:
                    continue

            tried_to_authn = True
            self.ui.debug('imap', u'Attempting '
              '%s authentication'% m)
            try:
                if func(imapobj):
                    return
            except (imapobj.error, OfflineImapError) as e:
                self.ui.warn('%s authentication failed: %s'% (m, e))
                exc_stack.append((m, e))

        if len(exc_stack):
            msg = "\n\t".join([": ".join((x[0], str(x[1]))) for x in exc_stack])
            raise OfflineImapError("All authentication types "
              "failed:\n\t%s"% msg, OfflineImapError.ERROR.REPO)

        if not tried_to_authn:
            methods = ", ".join([x[5:] for x in
                [x for x in imapobj.capabilities if x[0:5] == "AUTH="]])
            raise OfflineImapError(u"Repository %s: no supported "
              "authentication mechanisms found; configured %s, "
              "server advertises %s"% (self.repos,
              ", ".join(self.authmechs), methods),
              OfflineImapError.ERROR.REPO)

    def __verifycert(self, cert, hostname):
        """Verify that cert (in socket.getpeercert() format) matches hostname.

        CRLs are not handled.
        Returns error message if any problems are found and None on success."""

        errstr = "CA Cert verifying failed: "
        if not cert:
            return ('%s no certificate received'% errstr)
        dnsname = hostname.lower()
        certnames = []

        # cert expired?
        notafter = cert.get('notAfter')
        if notafter:
            if time.time() >= cert_time_to_seconds(notafter):
                return '%s certificate expired %s'% (errstr, notafter)

        # First read commonName
        for s in cert.get('subject', []):
            key, value = s[0]
            if key == 'commonName':
                certnames.append(value.lower())
        if len(certnames) == 0:
            return ('%s no commonName found in certificate'% errstr)

        # Then read subjectAltName
        for key, value in cert.get('subjectAltName', []):
            if key == 'DNS':
                certnames.append(value.lower())

        # And finally try to match hostname with one of these names
        for certname in certnames:
            if (certname == dnsname or
                '.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1]):
                return None

        return ('%s no matching domain name found in certificate'% errstr)

    def acquireconnection(self):
        """Fetches a connection from the pool, making sure to create a new one
        if needed, to obey the maximum connection limits, etc.
        Opens a connection to the server and returns an appropriate
        object."""

        self.semaphore.acquire()
        self.connectionlock.acquire()
        curThread = currentThread()
        imapobj = None

        if len(self.availableconnections): # One is available.
            # Try to find one that previously belonged to this thread
            # as an optimization.  Start from the back since that's where
            # they're popped on.
            for i in range(len(self.availableconnections) - 1, -1, -1):
                tryobj = self.availableconnections[i]
                if self.lastowner[tryobj] == curThread.ident:
                    imapobj = tryobj
                    del(self.availableconnections[i])
                    break
            if not imapobj:
                imapobj = self.availableconnections[0]
                del(self.availableconnections[0])
            self.assignedconnections.append(imapobj)
            self.lastowner[imapobj] = curThread.ident
            self.connectionlock.release()
            return imapobj

        self.connectionlock.release()   # Release until need to modify data

        # Must be careful here that if we fail we should bail out gracefully
        # and release locks / threads so that the next attempt can try...
        success = False
        try:
            while success is not True:
                # Generate a new connection.
                if self.tunnel:
                    self.ui.connecting(
                        self.repos.getname(), 'tunnel', self.tunnel)
                    imapobj = imaplibutil.IMAP4_Tunnel(
                        self.tunnel,
                        timeout=socket.getdefaulttimeout(),
                        use_socket=self.proxied_socket,
                        )
                    success = True
                elif self.usessl:
                    self.ui.connecting(
                        self.repos.getname(), self.hostname, self.port)
                    self.ui.debug('imap', "%s: level '%s', version '%s'"%
                        (self.repos.getname(), self.tlslevel, self.sslversion))
                    imapobj = imaplibutil.WrappedIMAP4_SSL(
                        host=self.hostname,
                        port=self.port,
                        keyfile=self.sslclientkey,
                        certfile=self.sslclientcert,
                        ca_certs=self.sslcacertfile,
                        cert_verify_cb=self.__verifycert,
                        ssl_version=self.sslversion,
                        timeout=socket.getdefaulttimeout(),
                        fingerprint=self.fingerprint,
                        use_socket=self.proxied_socket,
                        tls_level=self.tlslevel,
                        af=self.af,
                        )
                else:
                    self.ui.connecting(
                        self.repos.getname(), self.hostname, self.port)
                    imapobj = imaplibutil.WrappedIMAP4(
                        self.hostname, self.port,
                        timeout=socket.getdefaulttimeout(),
                        use_socket=self.proxied_socket,
                        af=self.af,
                        )

                if not self.preauth_tunnel:
                    try:
                        self.__authn_helper(imapobj)
                        self.goodpassword = self.password
                        success = True
                    except OfflineImapError as e:
                        self.passworderror = str(e)
                        raise

            # Enable compression
            if self.repos.getconfboolean('usecompression', 0):
                imapobj.enable_compression()

            # update capabilities after login, e.g. gmail serves different ones
            typ, dat = imapobj.capability()
            if dat != [None]:
                imapobj.capabilities = tuple(dat[-1].upper().split())

            if self.delim == None:
                listres = imapobj.list(self.reference, '""')[1]
                if listres == [None] or listres == None:
                    # Some buggy IMAP servers do not respond well to LIST "" ""
                    # Work around them.
                    listres = imapobj.list(self.reference, '"*"')[1]
                if listres == [None] or listres == None:
                    # No Folders were returned. This occurs, e.g. if the
                    # 'reference' prefix does not exist on the mail
                    # server. Raise exception.
                    err = "Server '%s' returned no folders in '%s'"% \
                        (self.repos.getname(), self.reference)
                    self.ui.warn(err)
                    raise Exception(err)
                self.delim, self.root = \
                     imaputil.imapsplit(listres[0])[1:]
                self.delim = imaputil.dequote(self.delim)
                self.root = imaputil.dequote(self.root)

            with self.connectionlock:
                self.assignedconnections.append(imapobj)
                self.lastowner[imapobj] = curThread.ident
            return imapobj
        except Exception as e:
            """If we are here then we did not succeed in getting a
            connection - we should clean up and then re-raise the
            error..."""

            self.semaphore.release()

            severity = OfflineImapError.ERROR.REPO
            if type(e) == gaierror:
                #DNS related errors. Abort Repo sync
                #TODO: special error msg for e.errno == 2 "Name or service not known"?
                reason = "Could not resolve name '%s' for repository "\
                         "'%s'. Make sure you have configured the ser"\
                         "ver name correctly and that you are online."%\
                         (self.hostname, self.repos)
                six.reraise(OfflineImapError,
                            OfflineImapError(reason, severity),
                            exc_info()[2])

            elif isinstance(e, SSLError) and e.errno == errno.EPERM:
                # SSL unknown protocol error
                # happens e.g. when connecting via SSL to a non-SSL service
                if self.port != 993:
                    reason = "Could not connect via SSL to host '%s' and non-s"\
                        "tandard ssl port %d configured. Make sure you connect"\
                        " to the correct port. Got: %s"% (
                            self.hostname, self.port, e)
                else:
                    reason = "Unknown SSL protocol connecting to host '%s' for "\
                         "repository '%s'. OpenSSL responded:\n%s"\
                         % (self.hostname, self.repos, e)
                six.reraise(OfflineImapError,
                            OfflineImapError(reason, severity),
                            exc_info()[2])

            elif isinstance(e, socket.error) and e.args[0] == errno.ECONNREFUSED:
                # "Connection refused", can be a non-existing port, or an unauthorized
                # webproxy (open WLAN?)
                reason = "Connection to host '%s:%d' for repository '%s' was "\
                    "refused. Make sure you have the right host and port "\
                    "configured and that you are actually able to access the "\
                    "network."% (self.hostname, self.port, self.repos)
                six.reraise(OfflineImapError,
                            OfflineImapError(reason, severity),
                            exc_info()[2])
            # Could not acquire connection to the remote;
            # socket.error(last_error) raised
            if str(e)[:24] == "can't open socket; error":
                six.reraise(OfflineImapError,
                            OfflineImapError(
                                "Could not connect to remote server '%s' "
                                "for repository '%s'. Remote does not answer."%
                                (self.hostname, self.repos),
                                OfflineImapError.ERROR.REPO),
                            exc_info()[2])
            else:
                # re-raise all other errors
                raise

    def connectionwait(self):
        """Waits until there is a connection available.

        Note that between the time that a connection becomes available and the
        time it is requested, another thread may have grabbed it.  This function
        is mainly present as a way to avoid spawning thousands of threads to
        copy messages, then have them all wait for 3 available connections.
        It's OK if we have maxconnections + 1 or 2 threads, which is what this
        will help us do."""

        self.semaphore.acquire() # Blocking until maxconnections has free slots.
        self.semaphore.release()

    def close(self):
        # Make sure I own all the semaphores.  Let the threads finish
        # their stuff.  This is a blocking method.
        with self.connectionlock:
            # first, wait till all connections had been released.
            # TODO: won't work IMHO, as releaseconnection() also
            # requires the connectionlock, leading to a potential
            # deadlock! Audit & check!
            threadutil.semaphorereset(self.semaphore, self.maxconnections)
            for imapobj in self.assignedconnections + self.availableconnections:
                imapobj.logout()
            self.assignedconnections = []
            self.availableconnections = []
            self.lastowner = {}
            # reset GSSAPI state
            self.gss_vc = None
            self.gssapi = False

    def keepalive(self, timeout, event):
        """Sends a NOOP to each connection recorded.

        It will wait a maximum of timeout seconds between doing this, and will
        continue to do so until the Event object as passed is true.  This method
        is expected to be invoked in a separate thread, which should be join()'d
        after the event is set."""

        self.ui.debug('imap', 'keepalive thread started')
        while not event.isSet():
            self.connectionlock.acquire()
            numconnections = len(self.assignedconnections) + \
                             len(self.availableconnections)
            self.connectionlock.release()

            threads = []
            for i in range(numconnections):
                self.ui.debug('imap', 'keepalive: processing connection %d of %d'%
                                      (i, numconnections))
                if len(self.idlefolders) > i:
                    # IDLE thread
                    idler = IdleThread(self, self.idlefolders[i])
                else:
                    # NOOP thread
                    idler = IdleThread(self)
                idler.start()
                threads.append(idler)

            self.ui.debug('imap', 'keepalive: waiting for timeout')
            event.wait(timeout)
            self.ui.debug('imap', 'keepalive: after wait')

            for idler in threads:
                # Make sure all the commands have completed.
                idler.stop()
                idler.join()
            self.ui.debug('imap', 'keepalive: all threads joined')
        self.ui.debug('imap', 'keepalive: event is set; exiting')
        return


    def releaseconnection(self, connection, drop_conn=False):
        """Releases a connection, returning it to the pool.

        :param drop_conn: If True, the connection will be released and
           not be reused. This can be used to indicate broken connections."""

        if connection is None:
            return # Noop on bad connection.

        self.connectionlock.acquire()
        self.assignedconnections.remove(connection)
        # Don't reuse broken connections
        if connection.Terminate or drop_conn:
            connection.logout()
        else:
            self.availableconnections.append(connection)
        self.connectionlock.release()
        self.semaphore.release()
def growMulC():
	semaphore = BoundedSemaphore()
	semaphore.acquire()
	global_var.counter3 += 1
	semaphore.release()
Esempio n. 52
0
class Downloader(object):
    NUMTHREAD_DEFAULT = 4

    # Limit maximum number of threads
    # Semaphore to enforce upper limit
    # Restrict result queue size to prevent out-of-memory
    def __init__(self, maxConcurrentRequests = NUMTHREAD_DEFAULT):
        self.numThreads = maxConcurrentRequests
        self.resourcePool = BoundedSemaphore(self.numThreads)
        self.resQueue = Queue(100000)

    # Dispatches a separate thread to download file.
    # Downloads the HTML file at the given URL and writes to file with same name as URL.
    # Blocks if there are more than the specified threshold concurrent requests running.
    def download(self, url):
        print datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ": Trying to get thread"
        self.resourcePool.acquire()	# Blocking by default
        print datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ": Successfully acquired thread"
        Thread(target=self.privateDownload, args=(url,)).start()

    # Returns a tuple (url, htmlString, RTT)
    # Blocks until there is a result
    def getResult(self):
        return self.resQueue.get(True)

    # Private
    def privateDownload(self, url):
        if url==None:
            return

        # Chop off any trailing CRLF or LF
        crIndex = url.find("\r")
        lfIndex = url.find("\n")
        if (crIndex==-1) != (lfIndex==-1):
            url = url[0:max(crIndex, lfIndex)]
        elif crIndex!=-1:
            url = url[0:min(crIndex, lfIndex)]

        # Form HTTP request
        req = HttpRequests.get(url, {"Connection": "close", "User-Agent": "game-price-crawler", "Accept": "text/html"})

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        # Use TLS socket if is HTTPS
        try:
            if req.isSecureProtocol():
                s = ssl.wrap_socket(s)
                s.connect((req.getHeader("Host"), 443))
                s.do_handshake()
            else:
                s.connect((req.getHeader("Host"), 80))

            print datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ": %s: Sending request" % (url)
            start = time.time()

            # Send request
            s.send(str(req))

            # Use temporary file to buffer socket input
            fp = s.makefile('r', 2048)
            res = ""
            line = fp.readline()
            end = time.time()
            print end-start

            # Read only the response header
            while line!="\r\n" and line!="":
                res+=line
                line = fp.readline()
            res+=line

            # Convert header string into object for easy access of header fields
            headers = Downloader.convertToHeaderObject(res)
            print datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ": %s: %d %s" % (url, headers["statusCode"], headers["statusMessage"])

            # Status OK, proceed to download
            if headers["statusCode"]==200:
                res = fp.read()
                try:
                    self.resQueue.put((url, res, int((end-start)*1000)), True, 0.5)
                except:
                    print datetime.now().strftime("%d/%m/%Y %H:%M%S") + ": Result queue full, dropping "+url

            # A redirect response, follow the redirect link
            elif headers.get("Location")!=None:
                print datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ": %s: Re: directing to %s" % (url, headers["Location"])
                fp.close()
                s.close()
                self.privateDownload(headers["Location"])
                return
            fp.close()
        except Exception as e:
            print str(e)
            print datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ": Unable to fetch resource %s" % (url)

        # Close socket and allow other threads to be spawned
        s.close()
        self.resourcePool.release()
        print datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ": Thread released by " + url

    # s must be a properly formatted HTTP response header string,
    # each line ending with CRLF and the header ending with CRLF after the last line.
    @staticmethod
    def convertToHeaderObject(s):
        obj = {}
        sidx = s.find(" ")+1
        eidx = s.find(" ", sidx)
        obj['statusCode'] = int(s[sidx:eidx])
        sidx = eidx+1
        eidx = s.find("\r\n", sidx)
        obj['statusMessage'] = s[sidx:eidx]
        sidx = eidx+2
        eidx = s.find("\r\n", sidx)
        while eidx!=sidx:
            l = s[sidx:eidx]
            header = l[:l.find(":")]
            value = l[l.find(": ")+2:]
            obj[header] = value
            sidx = eidx+2
            eidx = s.find("\r\n", sidx)
        return obj
Esempio n. 53
0
class repl_buffer:
    def __init__ ( self, output_pipe ):

        self.output   = output_pipe
        self.filename = ''
        self.buffer   = ''
        self.sema     = BoundedSemaphore()
                            # Semaphore to synchronize access to the global display queue

    def setfile( self, filename ):
        """Set output filename. Greet user if this is the first time.
        """
        self.sema.acquire()
        oldname = self.filename
        self.filename = filename
        if oldname == '':
            try:
                # Delete old file creted at a previous run
                os.remove( self.filename )
            except:
                # OK, at least we tried
                pass
            self.write_nolock( newline + ';;; Slimv client is connected to REPL on port ' + str(PORT) + '.' + newline, True )
            user = None
            if mswindows:
                user = os.getenv('USERNAME')
            else:
                user = os.getenv('USER')
            if not user:
                self.write_nolock( ';;; This could be the start of a beautiful program.' + newline + newline, True )
            else:
                self.write_nolock( ';;; ' + user + ', this could be the start of a beautiful program.' + newline + newline, True )
        self.sema.release()

    def writebegin( self ):
        """Begin the writing process. The process is protected by a semaphore.
        """
        self.sema.acquire()

    def writeend( self ):
        """Finish the writing process. Release semaphore
        """
        self.sema.release()

    def write_nolock( self, text, fileonly=False ):
        """Write text into the global display queue buffer.
           The writing process is not protected.
        """
        if not fileonly:
            try:
                # Write all lines to the display
                os.write( self.output.fileno(), text )
            except:
                pass

        if self.filename != '':
            tries = 4
            while tries > 0:
                try:
                    file = open( self.filename, 'at' )
                    try:
                        #file.write( text )
                        if self.buffer != '':
                            # There is output pending
                            os.write(file.fileno(), self.buffer )
                            self.buffer = ''
                        os.write(file.fileno(), text )
                    finally:
                        file.close()
                    tries = 0
                except IOError:
                    tries = tries - 1
                    if tries == 0:
                        traceback.print_exc()
                    time.sleep(0.05)
                except:
                    tries = tries - 1
                    time.sleep(0.05)
        elif len( self.buffer ) < 2000:
            # No filename supplied, collect output info a buffer until filename is given
            # We collect only some bytes, then probably no filename will be given at all
            self.buffer = self.buffer + text

    def write( self, text, fileonly=False ):
        """Write text into the global display queue buffer.
           The writing process is protected by a semaphome.
        """
        self.writebegin()
        self.write_nolock( text, fileonly )
        self.writeend()
class FilePersistentImageManager(PersistentImageManager):
    """ TODO: Docstring for PersistentImageManager  """

    storage_path = prop("_storage_path")

    def __init__(self, storage_path=STORAGE_PATH):
        self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
        if not os.path.exists(storage_path):
            self.log.debug("Creating directory (%s) for persistent storage" % (storage_path))
            os.makedirs(storage_path)
            os.chmod(storage_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
        elif not os.path.isdir(storage_path):
            raise ImageFactoryException("Storage location (%s) already exists and is not a directory - cannot init persistence" % (storage_path))
        else:
            # TODO: verify that we can write to this location
            pass
        self.storage_path = storage_path
        self.metadata_lock = BoundedSemaphore()


    def _image_from_metadata(self, metadata):
        # Given the retrieved metadata from mongo, return a PersistentImage type object
        # with us as the persistent_manager.

        image_module = __import__(metadata['type'], globals(), locals(), [metadata['type']], -1)
        image_class = getattr(image_module, metadata['type'])
        image = image_class(metadata['identifier'])

        # We don't actually want a 'type' property in the resulting PersistentImage object
        del metadata['type']

        for key in image.metadata().union(metadata.keys()):
            setattr(image, key, metadata.get(key))

        #set ourselves as the manager
        image.persistent_manager = self

        return image


    def _metadata_from_file(self, metadatafile):
        self.metadata_lock.acquire()
        try:
            mdf = open(metadatafile, 'r')
            metadata = json.load(mdf)
            mdf.close()
        finally:
            self.metadata_lock.release()
        return metadata


    def image_with_id(self, image_id):
        """
        TODO: Docstring for image_with_id

        @param image_id TODO 

        @return TODO
        """
        metadatafile = self.storage_path + '/' + image_id + METADATA_EXT
        try:
            metadata = self._metadata_from_file(metadatafile)
        except Exception as e:
            self.log.debug('Exception caught: %s' % e)
            return None

        return self._image_from_metadata(metadata)


    def images_from_query(self, query):
        images = [ ]
        for storefileshortname in os.listdir(self.storage_path):
            storefilename = self.storage_path + '/' + storefileshortname
            if re.search(METADATA_EXT, storefilename):
                try:
                    metadata = self._metadata_from_file(storefilename)
                    match = True
                    for querykey in query:
                        if metadata[querykey] != query[querykey]:
                            match = False
                            break
                    if match:
                        images.append(self._image_from_metadata(metadata))
                except:
                    self.log.warn("Could not extract image metadata from file (%s)" % (storefilename))

        return images              


    def add_image(self, image):
        """
        TODO: Docstring for add_image

        @param image TODO 

        @return TODO
        """
        image.persistent_manager = self
        basename = self.storage_path + '/' + str(image.identifier)
        metadata_path = basename + METADATA_EXT
        body_path = basename + BODY_EXT
        image.data = body_path
        try:
            if not os.path.isfile(metadata_path):
                open(metadata_path, 'w').close()
                self.log.debug('Created file %s' % metadata_path)
            if not os.path.isfile(body_path):
                open(body_path, 'w').close()
                self.log.debug('Created file %s' % body_path)
        except IOError as e:
            self.log.debug('Exception caught: %s' % e)

        self.save_image(image)

    def save_image(self, image):
        """
        TODO: Docstring for save_image

        @param image TODO

        @return TODO
        """
        image_id = str(image.identifier)
        metadata_path = self.storage_path + '/' + image_id + METADATA_EXT
        if not os.path.isfile(metadata_path):
            raise ImageFactoryException('Image %s not managed, use "add_image()" first.' % image_id)
        try:
            meta = {'type': type(image).__name__}
            for mdprop in image.metadata():
                meta[mdprop] = getattr(image, mdprop, None)
 
            self.metadata_lock.acquire()
            try:
                mdf = open(metadata_path, 'w')
                json.dump(meta, mdf)
                mdf.close()
            finally:
                self.metadata_lock.release()

            self.log.debug("Saved metadata for image (%s): %s" % (image_id, meta))
        except Exception as e:
            self.log.debug('Exception caught: %s' % e)
            raise ImageFactoryException('Unable to save image metadata: %s' % e)

    def delete_image_with_id(self, image_id):
        """
        TODO: Docstring for delete_image_with_id

        @param image_id TODO 

        @return TODO
        """
        basename = self.storage_path + '/' + image_id
        metadata_path = basename + METADATA_EXT
        body_path = basename + BODY_EXT
        try:
            os.remove(metadata_path)
            os.remove(body_path)
        except Exception as e:
            self.log.warn('Unable to delete file: %s' % e)
Esempio n. 55
0
class SimpleStorageManager(StorageManager):

    def __init__(self, storage_driver, table_info_repo, concurrent_tasks=1000,
                 batch_chunk_size=25, schema_operation_timeout=300):
        self._storage_driver = storage_driver
        self._table_info_repo = table_info_repo
        self._batch_chunk_size = batch_chunk_size
        self._schema_operation_timeout = schema_operation_timeout
        self.__task_executor = ThreadPoolExecutor(concurrent_tasks)
        self.__task_semaphore = BoundedSemaphore(concurrent_tasks)

    def _do_create_table(self, context, table_info):
        try:
            table_info.internal_name = self._storage_driver.create_table(
                context, table_info
            )
            table_info.status = TableMeta.TABLE_STATUS_ACTIVE
            self._table_info_repo.update(
                context, table_info, ["status", "internal_name"]
            )
        except BackendInteractionException as ex:
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                            ex.message, priority=notifier.PRIORITY_ERROR)
            raise

        notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_END,
                        table_info.schema)

    def create_table(self, context, table_name, table_schema):
        notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_START,
                        table_schema)

        table_info = TableInfo(table_name, table_schema,
                               TableMeta.TABLE_STATUS_CREATING)
        try:
            self._table_info_repo.save(context, table_info)
        except TableAlreadyExistsException as e:
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                            e.message, priority=notifier.PRIORITY_ERROR)
            raise

        self._do_create_table(context, table_info)

        return TableMeta(table_info.schema, table_info.status)

    def _do_delete_table(self, context, table_info):
        self._storage_driver.delete_table(context, table_info)

        self._table_info_repo.delete(context, table_info.name)

        notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_END,
                        table_info.name)

    def delete_table(self, context, table_name):
        notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_START,
                        table_name)
        try:
            table_info = self._table_info_repo.get(context,
                                                   table_name,
                                                   ['status'])
        except TableNotExistsException as e:
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                            e.message, priority=notifier.PRIORITY_ERROR)
            raise

        if table_info.status == TableMeta.TABLE_STATUS_DELETING:
            # table is already being deleted, just return immediately
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_END,
                            table_name)
            return TableMeta(table_info.schema, table_info.status)
        elif table_info.status != TableMeta.TABLE_STATUS_ACTIVE:
            e = ResourceInUseException()
            notifier.notify(context, notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                            table_name + ' ' + e.message,
                            priority=notifier.PRIORITY_ERROR)
            raise e

        table_info.status = TableMeta.TABLE_STATUS_DELETING

        self._table_info_repo.update(context, table_info, ["status"])

        self._do_delete_table(context, table_info)

        return TableMeta(table_info.schema, table_info.status)

    def describe_table(self, context, table_name):
        table_info = self._table_info_repo.get(
            context, table_name, ['status', 'last_updated'])
        notifier.notify(context, notifier.EVENT_TYPE_TABLE_DESCRIBE,
                        table_name, priority=notifier.PRIORITY_DEBUG)

        timedelta = datetime.now() - table_info.last_updated

        if timedelta.total_seconds() > self._schema_operation_timeout:
            if table_info.status == TableMeta.TABLE_STATUS_CREATING:
                table_info.status = TableMeta.TABLE_STATUS_CREATE_FAILED
                self._table_info_repo.update(context, table_info, ['status'])
                LOG.debug(
                    "Table '{}' creation timed out."
                    " Setting status to {}".format(
                        table_info.name, TableMeta.TABLE_STATUS_CREATE_FAILED)
                )
                notifier.notify(
                    context.to_dict(),
                    notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                    dict(
                        table_name=table_name,
                        message='Operation timed out'
                    )
                )

            if table_info.status == TableMeta.TABLE_STATUS_DELETING:
                table_info.status = TableMeta.TABLE_STATUS_DELETE_FAILED
                self._table_info_repo.update(context, table_info, ['status'])
                LOG.debug(
                    "Table '{}' deletion timed out."
                    " Setting status to {}".format(
                        table_info.name, TableMeta.TABLE_STATUS_DELETE_FAILED)
                )
                notifier.notify(
                    context.to_dict(),
                    notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
                    dict(
                        table_name=table_name,
                        message='Operation timed out'
                    )
                )

        return TableMeta(table_info.schema, table_info.status)

    def list_tables(self, context, exclusive_start_table_name=None,
                    limit=None):
        tnames = self._table_info_repo.get_tenant_table_names(
            context, exclusive_start_table_name, limit
        )
        notifier.notify(
            context, notifier.EVENT_TYPE_TABLE_LIST,
            dict(
                exclusive_start_table_name=exclusive_start_table_name,
                limit=limit
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return tnames

    def _execute_async(self, func, *args, **kwargs):
        weak_self = weakref.proxy(self)

        def callback(future):
            weak_self.__task_semaphore.release()

        self.__task_semaphore.acquire()
        future = self.__task_executor.submit(func, *args, **kwargs)
        future.add_done_callback(callback)
        return future

    @staticmethod
    def _validate_table_is_active(table_info):
        if table_info.status != TableMeta.TABLE_STATUS_ACTIVE:
            raise ValidationError(
                _("Can't execute request: "
                  "Table '%(table_name)s' status '%(table_status)s' "
                  "isn't %(expected_status)s"),
                table_name=table_info.name, table_status=table_info.status,
                expected_status=TableMeta.TABLE_STATUS_ACTIVE
            )

    @staticmethod
    def _validate_table_schema(table_info, attribute_map, keys_only=True,
                               index_name=None):
        schema_key_attributes = table_info.schema.key_attributes
        schema_attribute_type_map = table_info.schema.attribute_type_map

        key_attribute_names_to_find = set(schema_key_attributes)
        if index_name is not None:
            key_attribute_names_to_find.add(
                table_info.schema.index_def_map[index_name].alt_range_key_attr
            )

        if keys_only and (
                len(key_attribute_names_to_find) != len(attribute_map)):
            raise ValidationError(
                _("Specified key: %(key_attributes)s doesn't match expected "
                  "key attributes set: %(expected_key_attributes)s"),
                key_attributes=attribute_map,
                expected_key_attributes=key_attribute_names_to_find
            )

        for attr_name, typed_attr_value in attribute_map.iteritems():
            schema_attr_type = schema_attribute_type_map.get(attr_name, None)
            if schema_attr_type is None:
                continue
            key_attribute_names_to_find.discard(attr_name)

            if schema_attr_type != typed_attr_value.attr_type:
                raise ValidationError(
                    _("Attribute: '%(attr_name)s' of type: '%(attr_type)s' "
                      "doesn't match table schema expected attribute type: "
                      "'%(expected_attr_type)s'"),
                    attr_name=attr_name,
                    attr_type=typed_attr_value.attr_type.type,
                    expected_attr_type=schema_attr_type.type
                )

        if key_attribute_names_to_find:
            raise ValidationError(
                _("Couldn't find expected key attributes: "
                  "'%(expected_key_attributes)s'"),
                expected_key_attributes=key_attribute_names_to_find
            )

    def put_item(self, context, table_name, attribute_map, return_values=None,
                 if_not_exist=False, expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, attribute_map,
                                    keys_only=False)

        with self.__task_semaphore:
            result = self._storage_driver.put_item(
                context, table_info, attribute_map, return_values,
                if_not_exist, expected_condition_map
            )
        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_PUTITEM,
            dict(
                table_name=table_name,
                attribute_map=attribute_map,
                return_values=return_values,
                if_not_exist=if_not_exist,
                expected_condition_map=expected_condition_map
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return result

    def _put_item_async(self, context, table_info, attribute_map,
                        return_values=None, if_not_exist=False,
                        expected_condition_map=None):
        payload = dict(
            table_name=table_info.name,
            attribute_map=attribute_map,
            return_values=return_values,
            if_not_exist=if_not_exist,
            expected_condition_map=expected_condition_map
        )
        notifier.notify(context, notifier.EVENT_TYPE_DATA_PUTITEM_START,
                        payload)

        put_future = self._execute_async(
            self._storage_driver.put_item,
            context, table_info, attribute_map, return_values,
            if_not_exist, expected_condition_map
        )

        def callback(future):
            if not future.exception():
                notifier.notify(
                    context, notifier.EVENT_TYPE_DATA_PUTITEM_END,
                    payload
                )
            else:
                notifier.notify(
                    context, notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
                    payload=future.exception(),
                    priority=notifier.PRIORITY_ERROR
                )

        put_future.add_done_callback(callback)
        return put_future

    def put_item_async(self, context, table_name, attribute_map, return_values,
                       if_not_exist=False, expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, attribute_map, keys_only=False)

        return self._put_item_async(
            context, table_info, attribute_map, return_values,
            if_not_exist, expected_condition_map
        )

    def delete_item(self, context, table_name, key_attribute_map,
                    expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, key_attribute_map)

        with self.__task_semaphore:
            result = self._storage_driver.delete_item(
                context, table_info, key_attribute_map, expected_condition_map
            )
        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_DELETEITEM,
            dict(
                table_name=table_name,
                key_attribute_map=key_attribute_map,
                expected_condition_map=expected_condition_map
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return result

    def _delete_item_async(self, context, table_info, key_attribute_map,
                           expected_condition_map=None):
        payload = dict(
            table_name=table_info.name,
            key_attribute_map=key_attribute_map,
            expected_condition_map=expected_condition_map
        )
        notifier.notify(context, notifier.EVENT_TYPE_DATA_DELETEITEM_START,
                        payload)

        del_future = self._execute_async(
            self._storage_driver.delete_item,
            context, table_info, key_attribute_map, expected_condition_map
        )

        def callback(future):
            if not future.exception():
                notifier.notify(
                    context, notifier.EVENT_TYPE_DATA_DELETEITEM_END,
                    payload
                )
            else:
                notifier.notify(
                    context, notifier.EVENT_TYPE_DATA_DELETEITEM_ERROR,
                    future.exception(), priority=notifier.PRIORITY_ERROR
                )

        del_future.add_done_callback(callback)
        return del_future

    def delete_item_async(self, context, table_name, key_attribute_map,
                          expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, key_attribute_map)

        return self._delete_item_async(context, table_info, key_attribute_map,
                                       expected_condition_map)

    def execute_write_batch(self, context, write_request_map):
        notifier.notify(context, notifier.EVENT_TYPE_DATA_BATCHWRITE_START,
                        write_request_map)
        write_request_list_to_send = []
        for table_name, write_request_list in write_request_map.iteritems():
            table_info = self._table_info_repo.get(context, table_name)
            for req in write_request_list:
                self._validate_table_is_active(table_info)

                if req.is_put:
                    self._validate_table_schema(table_info, req.attribute_map,
                                                keys_only=False)
                else:
                    self._validate_table_schema(table_info, req.attribute_map)

                write_request_list_to_send.append(
                    (table_info, req)
                )

        future_result_list = []
        for i in xrange(0, len(write_request_list_to_send),
                        self._batch_chunk_size):
            req_list = (
                write_request_list_to_send[i:i+self._batch_chunk_size]
            )

            future_result_list.append(
                self._batch_write_async(context, req_list)
            )

        unprocessed_items = {}
        for future_result in future_result_list:
            unprocessed_request_list = future_result.result()
            for (table_info, write_request) in unprocessed_request_list:
                table_name = table_info.name
                tables_unprocessed_items = (
                    unprocessed_items.get(table_name, None)
                )
                if tables_unprocessed_items is None:
                    tables_unprocessed_items = []
                    unprocessed_items[
                        table_name
                    ] = tables_unprocessed_items

                tables_unprocessed_items.append(write_request)

        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_BATCHWRITE_END,
            dict(
                write_request_map=write_request_map,
                unprocessed_items=unprocessed_items
            )
        )

        return unprocessed_items

    def _batch_write_async(self, context, write_request_list):
        future_result = Future()

        batch_future = self._execute_async(
            self._storage_driver.batch_write,
            context, write_request_list
        )

        def callback(res):
            try:
                res.result()
                unprocessed_items = ()
            except NotImplementedError:
                unprocessed_items = self._batch_write_in_emulation_mode(
                    context, write_request_list
                )
            except Exception:
                LOG.exception("Can't process batch write request")
                unprocessed_items = write_request_list
            future_result.set_result(unprocessed_items)

        batch_future.add_done_callback(callback)

        return future_result

    def _batch_write_in_emulation_mode(self, context, write_request_list):
        request_count = len(write_request_list)
        done_count = [0]
        done_event = Event()
        unprocessed_items = []
        for write_request in write_request_list:
            table_info, req = write_request
            if req.is_put:
                future_result = self._put_item_async(
                    context, table_info, req.attribute_map)
            elif req.is_delete:
                future_result = self._delete_item_async(
                    context, table_info, req.attribute_map
                )

            def make_callback():
                _write_request = write_request

                def callback(res):
                    try:
                        res.result()
                    except Exception:
                        unprocessed_items.append(_write_request)
                        LOG.exception("Can't process WriteItemRequest")
                    done_count[0] += 1
                    if done_count[0] >= request_count:
                        done_event.set()
                return callback

            future_result.add_done_callback(make_callback())

        done_event.wait()
        return unprocessed_items

    def execute_get_batch(self, context, read_request_list):
        assert read_request_list

        items = []
        unprocessed_items = []

        request_count = len(read_request_list)
        done_count = [0]

        done_event = Event()

        prepared_batch = []

        for req in read_request_list:
            def make_request_executor():
                _req = req

                _table_name = _req.table_name
                _key_attribute_map = _req.key_attribute_map

                _table_info = self._table_info_repo.get(context, _table_name)
                self._validate_table_is_active(_table_info)
                self._validate_table_schema(_table_info, _key_attribute_map)

                _attributes_to_get = req.attributes_to_get

                def callback(res):
                    try:
                        items.append((_table_name, res.result()))
                    except Exception:
                        unprocessed_items.append(_req)
                        LOG.exception("Can't process GetItemRequest")
                    done_count[0] += 1
                    if done_count[0] >= request_count:
                        done_event.set()

                def executor():
                    future_result = self._get_item_async(
                        context, _table_info,
                        _key_attribute_map.get(
                            _table_info.schema.hash_key_name
                        ),
                        _key_attribute_map.get(
                            _table_info.schema.range_key_name
                        ),
                        _attributes_to_get, consistent=_req.consistent
                    )
                    future_result.add_done_callback(callback)
                return executor
            prepared_batch.append(make_request_executor())

        notifier.notify(context, notifier.EVENT_TYPE_DATA_BATCHREAD_START,
                        read_request_list)

        for request_executor in prepared_batch:
            request_executor()

        done_event.wait()

        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_BATCHREAD_END,
            dict(
                read_request_list=read_request_list,
                unprocessed_items=unprocessed_items
            )
        )

        return items, unprocessed_items

    def update_item(self, context, table_name, key_attribute_map,
                    attribute_action_map, expected_condition_map=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)
        self._validate_table_schema(table_info, key_attribute_map)

        with self.__task_semaphore:
            result = self._storage_driver.update_item(
                context, table_info, key_attribute_map, attribute_action_map,
                expected_condition_map
            )
        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_UPDATEITEM,
            dict(
                table_name=table_name,
                key_attribute_map=key_attribute_map,
                attribute_action_map=attribute_action_map,
                expected_condition_map=expected_condition_map
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return result

    def select_item(self, context, table_name, indexed_condition_map,
                    select_type, index_name=None, limit=None,
                    exclusive_start_key=None, consistent=True,
                    order_type=None):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)

        schema_attribute_type_map = table_info.schema.attribute_type_map

        hash_key_name = table_info.schema.hash_key_name
        range_key_name = table_info.schema.range_key_name

        if index_name is not None:
            index_def = table_info.schema.index_def_map.get(index_name)
            if index_def is None:
                raise ValidationError(
                    _("Index '%(index_name)s' doesn't exist for table "
                      "'%(table_name)s'"),
                    index_name=index_name, table_name=table_name)
            range_key_name_to_query = index_def.alt_range_key_attr
        else:
            range_key_name_to_query = range_key_name

        if exclusive_start_key is not None:
            self._validate_table_schema(
                table_info, exclusive_start_key, index_name=index_name
            )

        indexed_condition_map_copy = indexed_condition_map.copy()

        hash_key_condition_list = indexed_condition_map_copy.pop(hash_key_name,
                                                                 None)
        range_key_to_query_condition_list = indexed_condition_map_copy.pop(
            range_key_name_to_query, None
        )

        indexed_condition_schema_valid = False
        if len(indexed_condition_map_copy) == 0 and hash_key_condition_list:
            hash_key_type = schema_attribute_type_map[hash_key_name]
            for hash_key_condition in hash_key_condition_list:
                for hash_key_condition_arg in hash_key_condition.args:
                    if hash_key_condition_arg.attr_type != hash_key_type:
                        break
                else:
                    continue
                break
            else:
                if range_key_to_query_condition_list:
                    range_key_to_query_type = schema_attribute_type_map[
                        range_key_name_to_query
                    ]
                    for range_key_to_query_condition in (
                            range_key_to_query_condition_list):
                        for range_key_to_query_condition_arg in (
                                range_key_to_query_condition.args):
                            if (range_key_to_query_condition_arg.attr_type !=
                                    range_key_to_query_type):
                                break
                        else:
                            continue
                        break
                    else:
                        indexed_condition_schema_valid = True
                else:
                    indexed_condition_schema_valid = True

        if not indexed_condition_schema_valid:
            raise ValidationError(
                _("Specified query conditions %(indexed_condition_map)s "
                  "doesn't match table schema: %(table_schema)s"),
                indexed_condition_map=indexed_condition_map,
                table_schema=table_info.schema
            )

        if (len(hash_key_condition_list) != 1 or
                hash_key_condition_list[0].type !=
                IndexedCondition.CONDITION_TYPE_EQUAL):
            raise ValidationError(
                _("Only equality condition is allowed for HASH key attribute "
                  "'%(hash_key_name)s'"),
                hash_key_name=hash_key_name,
            )

        with self.__task_semaphore:
            result = self._storage_driver.select_item(
                context, table_info, hash_key_condition_list,
                range_key_to_query_condition_list, select_type,
                index_name, limit, exclusive_start_key, consistent, order_type
            )
        notifier.notify(
            context, notifier.EVENT_TYPE_DATA_SELECTITEM,
            dict(
                table_name=table_name,
                indexed_condition_map=indexed_condition_map,
                select_type=select_type,
                index_name=index_name,
                limit=limit,
                exclusive_start_key=exclusive_start_key,
                consistent=consistent,
                order_type=order_type
            ),
            priority=notifier.PRIORITY_DEBUG
        )

        return result

    def _get_item_async(self, context, table_info, hash_key, range_key,
                        attributes_to_get, consistent=True):
        payload = dict(table_name=table_info.name,
                       hash_key=hash_key,
                       range_key=range_key,
                       attributes_to_get=attributes_to_get,
                       consistent=consistent)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SELECTITEM_START,
                        payload)
        select_type = (
            SelectType.all() if attributes_to_get is None else
            SelectType.specific_attributes(attributes_to_get)
        )
        hash_key_condition_list = [IndexedCondition.eq(hash_key)]
        range_key_condition_list = (
            None if range_key is None else [IndexedCondition.eq(range_key)]
        )

        result = self._execute_async(
            self._storage_driver.select_item,
            context, table_info, hash_key_condition_list,
            range_key_condition_list, select_type, consistent=consistent
        )
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SELECTITEM_END,
                        payload)
        return result

    def scan(self, context, table_name, condition_map, attributes_to_get=None,
             limit=None, exclusive_start_key=None,
             consistent=False):
        table_info = self._table_info_repo.get(context, table_name)
        self._validate_table_is_active(table_info)

        if exclusive_start_key is not None:
            self._validate_table_schema(table_info, exclusive_start_key)

        payload = dict(table_name=table_name,
                       condition_map=condition_map,
                       attributes_to_get=attributes_to_get,
                       limit=limit,
                       exclusive_start_key=exclusive_start_key,
                       consistent=consistent)
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SCAN_START,
                        payload)

        with self.__task_semaphore:
            result = self._storage_driver.scan(
                context, table_info, condition_map, attributes_to_get,
                limit, exclusive_start_key, consistent
            )
        notifier.notify(context, notifier.EVENT_TYPE_DATA_SCAN_END,
                        payload)

        return result

    def health_check(self):
        return self._storage_driver.health_check()
def _upload_blob_chunks(blob_service, container_name, blob_name,
                        blob_size, block_size, stream, max_connections,
                        progress_callback, validate_content, lease_id, uploader_class, 
                        maxsize_condition=None, if_match=None, timeout=None,
                        content_encryption_key=None, initialization_vector=None):

    encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector,
                                                       uploader_class is not _PageBlobChunkUploader)

    uploader = uploader_class(
        blob_service,
        container_name,
        blob_name,
        blob_size,
        block_size,
        stream,
        max_connections > 1,
        progress_callback,
        validate_content,
        lease_id,
        timeout,
        encryptor,
        padder
    )

    uploader.maxsize_condition = maxsize_condition

    # ETag matching does not work with parallelism as a ranged upload may start 
    # before the previous finishes and provides an etag
    uploader.if_match = if_match if not max_connections > 1 else None

    if progress_callback is not None:
        progress_callback(0, blob_size)

    if max_connections > 1:
        import concurrent.futures
        from threading import BoundedSemaphore

        '''
        Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor.
        This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if
        the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available.
        '''
        chunk_throttler = BoundedSemaphore(max_connections + 1)

        executor = concurrent.futures.ThreadPoolExecutor(max_connections)
        futures = []
        running_futures = []

        # Check for exceptions and fail fast.
        for chunk in uploader.get_chunk_streams():
            for f in running_futures:
                if f.done():
                    if f.exception():
                        raise f.exception()
                    else:
                        running_futures.remove(f)

            chunk_throttler.acquire()
            future = executor.submit(uploader.process_chunk, chunk)

            # Calls callback upon completion (even if the callback was added after the Future task is done).
            future.add_done_callback(lambda x: chunk_throttler.release())
            futures.append(future)
            running_futures.append(future)

        # result() will wait until completion and also raise any exceptions that may have been set.
        range_ids = [f.result() for f in futures]
    else:
        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]

    return range_ids
Esempio n. 57
0
class ThreadedLogger(BufferedLogger):
    """ A logger which actively runs in a thread.
        
        As with the `BufferedLogger` object, messages are not instantly saved
        to files. The difference here being that a thread is used to automate
        calls to `push`.
        
        With this, application developers shouldn't have to worry about whether
        or not all messages have been written to files.
    """
    
    control = None
    stop_loop = False
    
    def __init__(self, *args, **kwargs):
        super(ThreadedLogger, self).__init__(*args, **kwargs)
        self.level_lock = BoundedSemaphore()
        self.stop_loop = False
    
    def get_level(self):
        """ Return the current logging level. This blocks when threaded. """
        self.level_lock.acquire()
        l = self.level
        self.level_lock.release()
        return l
    
    def set_level(self, level):
        """ Set the current logging level. This blocks when threaded. """
        self.level_lock.acquire()
        self.level = level
        self.level_lock.release()
    
    def start(self):
        """ Start logging things in a thread. """
        self.stop_loop = False
        self.control = Thread(target=self.main)
        self.control.start()
    
    def stop(self):
        """ Tell the logger to stop running in a thread. """
        if self.control is None:
            return
        
        self.stop_loop = True
    
    def join(self, *args, **kwargs):
        """ Shortcut for `Thread.join`. """
        if self.control is None or self.stop_loop is False:
            return
        
        self.control.join(*args, **kwargs)
    
    def is_running(self):
        """ Determine whether or not the logger is running in a thread. """
        if self.control is None:
            return False
        
        return self.control.is_alive()
    
    def main(self):
        """ Main loop for the logger. """
        while True:
            time.sleep(2)
            self.push()
            if self.stop_loop:
                break
        
        self.stop_loop = False
        self.control = None
Esempio n. 58
0
class Logger(Thread):
    
    def __init__(self, dAmn, logging=True, debug=False):
        super(Logger, self).__init__()
        self.dAmn = dAmn
        self.logging = logging
        self.mute_channels = []
        self.debug = debug
        self.running = False
        self._run = False
        # Create locks
        self.qlock = BoundedSemaphore()
        #self.mlock = BoundedSemaphore()
        self.llock = BoundedSemaphore()
        # Create queues
        self.wqueue = [] # Output queue
        #self.mqueue = [] # Muter queue
        self.lqueue = [] # Logger queue. Just in case.
        # Just in case.
        self.subbing = False
        self.subthread = None
    
    def run(self):
        self.running = True
        self._run = True
        while self._run:
            # Muter processing
            #self.mlock.acquire()
            #if len(self.mqueue) != 0:
            #    self.flush_muters()
            #self.mlock.release()
            # Writer processing
            self.qlock.acquire()
            q = self.wqueue
            self.wqueue = []
            self.qlock.release()
            if len(q) != 0:
                self.flush(q)
            time.sleep(.5)
        #self.flush_muters()
        self.qlock.acquire()
        q = self.wqueue
        self.wqueue = []
        self.qlock.release()
        self.flush(q)
        if self.subbing:
            log, self.logging = self.logging, False
            self.writeout(time.time(), '~Global', '** Waiting for logs to finish writing...', False)
            while self.subbing:
                time.sleep(.4)
        self.running = False
    
    def stop(self):
        self._run = False
    
    def write(self, ts, ns, msg, showns=True, mute=False, pkt=None):
        if not showns and ns != '~Global' and self.dAmn.format_ns(ns) in self.dAmn.channel.keys():
            if msg.startswith('** Got'):
                if self.dAmn.channel[self.dAmn.format_ns(ns)].member == {}:
                    mute = True
        if not self.running:
            self.writeout(ts, ns, msg, showns, mute, pkt)
            return
        self.qlock.acquire()
        self.wqueue.append((ts, ns, msg, showns, mute, pkt))
        self.qlock.release()
    
    def flush(self, queue):
        if len(queue) > 60:
            self.writeout(time.time(), '~Global',
                '>> Received a ridiculous amount of data!', False)
            self.writeout(time.time(), '~Global',
                '>> Skipping all data in the queue.', False)
            if not self.logging:
                return
            self.writeout(time.time(), '~Global',
                '>> This data will still appear in the logs.', False)
            self.start_threaded_flush(queue)
        while len(queue) != 0:
            tups = queue.pop(0)
            self.writeout(*tups)
    
    def start_threaded_flush(self, queue):
        self.lqueue = queue
        self.subbing = True
        self.subthread = Thread(target=self.threaded_flush)
        self.subthread.start()
    
    def threaded_flush(self):
        while True:
            self.llock.acquire()
            q, self.lqueue = self.lqueue[:50], self.lqueue[50:]
            self.llock.release()
            for item in q:
                self.save_log(*item)
            self.llock.acquire()
            waiting = len(self.lqueue)
            self.llock.release()
            if waiting == 0:
                self.subbing = False
                return
            time.sleep(.8)
    
    def writeout(self, ts, ns, msg, showns=True, mute=False, pkt=None):
        if self.logging:
            self.save_msg(ts, ns, msg, showns, mute, pkt)
        self.pnt_message(ts, ns, '{0} {1}'.format(('{0}|'.format(ns) if showns else ''), msg), showns, mute, pkt)
        
    def pnt_message(self, ts, ns, message, showns=True, mute=False, pkt=None):
        try:
            if (mute or ns.lower() in self.dAmn.mute_channels) and not self.debug:
                return
            sys.stdout.write('{0}{1}\n'.format(self.clock(ts), message))
            if self.debug:
                self.save_msg(ts, '~Debug', message, showns, mute, pkt)
        except UnicodeError:
            sys.stdout.write('{0} >> Received an unprintable message!\n'.format(self.clock(ts)))
        sys.stdout.flush()
        
    def clock(self, ts):
        return '{0}|'.format(time.strftime('%H:%M:%S', time.localtime(ts)))
        
    def save_msg(self, ts, ns, msg, showns=True, mute=False, pkt=None):
        if not self.subbing:
            self.save_log(ts, ns, msg, showns, mute, pkt)
            return
        self.llock.acquire()
        self.lqueue.append((ts, ns, msg, showns, mute, pkt))
        self.llock.release()
        
    def save_log(self, ts, ns, msg, showns=True, mute=False, pkt=None):
        if not os.path.exists('./storage'): os.mkdir('./storage', 0o755)
        if not os.path.exists('./storage/logs'): os.mkdir('./storage/logs', 0o755)
        if not os.path.exists('./storage/logs/' + ns): os.mkdir('./storage/logs/' + ns, 0o755)
        file = open('./storage/logs/{0}/{1}.txt'.format(ns,
            time.strftime('%Y-%m-%d', time.localtime(ts))), 'a')
        try:
            file.write('{0} {1}{2}'.format(self.clock(ts), msg.lstrip(), "\n"))
        except UnicodeEncodeError:
            file.write('{0} >> Unprintable message received in {1}!\n'.format(self.clock(ts), ns))
        file.close()
Esempio n. 59
0
from threading import BoundedSemaphore


def download(url, sem):
    try:
        r = urllib2.urlopen(url)
        r.read()
    finally:
        sem.release()


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description="Format HTML.")
    parser.add_argument("url", type=str, help="url")

    args = parser.parse_args()

    threads = []
    sem = BoundedSemaphore(100)
    count = 0
    while True:
        count += 1
        sem.acquire()
        t = Thread(target=download, args=(args.url, sem))
        t.daemon = True
        t.start()
        if not count % 200:
            print count
Esempio n. 60
0
class Dispatcher(asyncore.dispatcher):
    """
The dNG server infrastructure allows an application to provide active
listeners, threaded connection establishment and to queue a defined amount
of requests transparently.

:author:     direct Netware Group et al.
:copyright:  (C) direct Netware Group - All rights reserved
:package:    pas
:subpackage: server
:since:      v0.2.00
:license:    https://www.direct-netware.de/redirect?licenses;mpl2
             Mozilla Public License, v. 2.0
    """

    # pylint: disable=unused-argument

    def __init__(self, listener_socket, active_handler, threads_active = 5, queue_handler = None, threads_queued = 10, thread_stopping_hook = None):
        """
Constructor __init__(Dispatcher)

:param listener_socket: Listener socket
:param active_handler: Thread to be used for activated connections
:param threads_active: Allowed simultaneous threads
:param queue_handler: Thread to be used for queued connections
:param threads_queued: Allowed queued threads
:param thread_stopping_hook: Thread stopping hook definition

:since: v0.2.00
        """

        asyncore.dispatcher.__init__(self, sock = listener_socket)

        self.active = False
        """
Listener state
        """
        self.active_handler = (active_handler if (issubclass(active_handler, Handler)) else None)
        """
Active queue handler
        """
        self.actives = None
        """
Active counter
        """
        self.actives_list = [ ]
        """
Active queue
        """
        self.listener_handle_connections = (listener_socket.type & socket.SOCK_STREAM == socket.SOCK_STREAM)
        """
Listener socket
        """
        self.listener_socket = listener_socket
        """
Listener socket
        """
        self.listener_startup_timeout = 45
        """
Listener startup timeout
        """
        self.local = None
        """
Local data handle
        """
        self._lock = InstanceLock()
        """
Thread safety lock
        """
        self.log_handler = NamedLoader.get_singleton("dNG.data.logging.LogHandler", False)
        """
The LogHandler is called whenever debug messages should be logged or errors
happened.
        """
        self.queue_handler = (queue_handler if (isinstance(queue_handler, Handler)) else None)
        """
Passive queue handler
        """
        self.queue_max = threads_queued
        """
Passive queue maximum
        """
        self.stopping_hook = ("" if (thread_stopping_hook is None) else thread_stopping_hook)
        """
Stopping hook definition
        """
        self.thread = None
        """
Thread if started and active
        """
        self.waiting = 0
        """
Thread safety lock
        """

        self.actives = BoundedSemaphore(threads_active if (self.listener_handle_connections) else 1)
    #

    def _active_activate(self, _socket):
        """
Run the active handler for the given socket.

:param _socket: Active socket resource

:since: v0.2.00
        """

        handler = self.active_handler()
        handler.set_instance_data(self, _socket)
        handler.start()

        if (self.log_handler is not None): self.log_handler.debug("{0!r} started a new thread '{1!r}'", self, handler, context = "pas_server")
    #

    def _active_queue(self, _socket):
        """
Put's an transport on the active queue or tries to temporarily save it on
the passive queue.

:param _socket: Active socket resource

:return: (bool) True if queued
:since:  v0.2.00
        """

        _return = False

        if (self.active):
            if (self.actives.acquire(self.queue_handler is None)):
                with self._lock:
                    if (self.active):
                        self.actives_list.append(_socket)
                        _return = True
                    else: self.actives.release()
                #
            else:
                handler = self.queue_handler()
                handler.set_instance_data(self, _socket)
                handler.start()

                self.waiting += 1
            #
        #

        return _return
    #

    def active_unqueue(self, _socket):
        """
Unqueue the given ID from the active queue.

:param _socket: Active socket resource

:since: v0.2.00
        """

        if (self._unqueue(self.actives_list, _socket)): self.actives.release()
    #

    def _active_unqueue_all(self):
        """
Unqueue all entries from the active queue (canceling running processes).

:since: v0.2.00
        """

        with self._lock:
            if (self.actives_list is not None):
                for _socket in self.actives_list:
                    if (self._unqueue(self.actives_list, _socket)): self.actives.release()
                #
            #
        #
    #

    def _ensure_thread_local(self):
        """
For thread safety some variables are defined per thread. This method makes
sure that these variables are defined.

:since: v0.2.00
        """

        if (self.local is None): self.local = local()
        if (not hasattr(self.local, "sockets")): self.local.sockets = { }
    #

    def handle_accept(self):
        """
python.org: Called on listening channels (passive openers) when a connection
can be established with a new remote endpoint that has issued a connect()
call for the local endpoint.

Deprecated since version 3.2.

:since: v0.2.00
        """

        # pylint: disable=broad-except

        if (self.active and self.listener_handle_connections):
            socket_data = None

            try: socket_data = self.accept()
            except Exception as handled_exception:
                if (self.log_handler is None): TracedException.print_current_stack_trace()
                else: self.log_handler.error(handled_exception, context = "pas_server")
            #

            if (socket_data is not None): self.handle_accepted(socket_data[0], socket_data[1])
        #
    #

    def handle_accepted(self, sock, addr):
        """
python.org: Called on listening channels (passive openers) when a connection
has been established with a new remote endpoint that has issued a connect()
call for the local endpoint.

:since: v0.2.00
        """

        # pylint: disable=broad-except

        if (self.active and self.listener_handle_connections):
            try:
                if (self._active_queue(sock)): self._active_activate(sock)
            except ShutdownException as handled_exception:
                exception = handled_exception.get_cause()

                if (exception is None and self.log_handler is not None): self.log_handler.error(handled_exception, context = "pas_server")
                else: handled_exception.print_stack_trace()
            except Exception as handled_exception:
                if (self.log_handler is None): TracedException.print_current_stack_trace()
                else: self.log_handler.error(handled_exception, context = "pas_server")
            #
        #
    #

    def handle_close(self):
        """
python.org: Called when the socket is closed.

:since: v0.2.00
        """

        if (self.active): self.stop()
    #

    def handle_connect(self):
        """
python.org: Called when the active opener's socket actually makes a
connection. Might send a "welcome" banner, or initiate a protocol
negotiation with the remote endpoint, for example.

:since: v0.2.00
        """

        if (self.active): self._start_listening()
    #

    def handle_error(self):
        """
python.org: Called when an exception is raised and not otherwise handled.

:since: v0.2.00
        """

        if (self.log_handler is None): TracedException.print_current_stack_trace()
        else: self.log_handler.error(traceback.format_exc(), context = "pas_server")
    #

    def handle_read(self):
        """
python.org: Called when the asynchronous loop detects that a "read()" call
on the channel's socket will succeed.

:since: v0.2.00
        """

        # pylint: disable=broad-except

        if ((not self.listener_handle_connections) and self.active):
            try:
                if (self._active_queue(self.listener_socket)): self._active_activate(self.listener_socket)
            except ShutdownException as handled_exception:
                exception = handled_exception.get_cause()

                if (exception is None and self.log_handler is not None): self.log_handler.error(handled_exception, context = "pas_server")
                else: handled_exception.print_stack_trace()
            except Exception as handled_exception:
                if (self.log_handler is None): TracedException.print_current_stack_trace()
                else: self.log_handler.error(handled_exception, context = "pas_server")
            #
        #
    #

    def handle_expt(self):
        """
python.org: Called when there is out of band (OOB) data for a socket
connection. This will almost never happen, as OOB is tenuously supported and
rarely used.

:since: v0.2.00
        """

        if (self.active): self._active_unqueue_all()
    #

    def _init(self):
        """
Initializes the dispatcher and stopping hook.

:since: v0.2.00
        """

        if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}._init()- (#echo(__LINE__)#)", self, context = "pas_server")

        if (self.stopping_hook is not None):
            stopping_hook = ("dNG.pas.Status.onShutdown" if (self.stopping_hook == "") else self.stopping_hook)
            Hook.register_weakref(stopping_hook, self.thread_stop)
        #
    #

    def is_active(self):
        """
Returns the listener status.

:return: (bool) True if active and listening
:since:  v0.2.00
        """

        return self.active
    #

    def start(self):
        """
Starts the prepared dispatcher in a new thread.

:since: v0.2.00
        """

        if (not self.active):
            is_already_active = False

            with self._lock:
                # Thread safety
                is_already_active = self.active
                if (not is_already_active): self.active = True
            #

            if (not is_already_active):
                self._init()
                Thread(target = self.run).start()
            #
        #
    #

    def _start_listening(self):
        """
Try to start listening on the prepared socket. Uses the defined startup
timeout to wait for the socket to become available before throwing an
exception.

:since: v0.2.00
        """

        # pylint: disable=broad-except,raising-bad-type

        _exception = None
        timeout_time = (time.time() + self.listener_startup_timeout)

        while (time.time() < timeout_time):
            try:
                if (_exception is not None): time.sleep(0.2)
                _exception = None

                self.listen(self.queue_max)

                break
            except Exception as handled_exception: _exception = handled_exception
        #

        if (_exception is not None): raise _exception
    #

    def run(self):
        """
Run the main loop for this server instance.

:since: v0.2.00
        """

        # pylint: disable=broad-except

        if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.run()- (#echo(__LINE__)#)", self, context = "pas_server")

        self._ensure_thread_local()

        try:
            if (not self.active):
                with self._lock:
                    # Thread safety
                    if (not self.active):
                        self.active = True
                        self._init()
                    #
                #
            #

            if (self.listener_handle_connections): self._start_listening()

            self.add_channel(self.local.sockets)
            asyncore.loop(5, map = self.local.sockets)
        except ShutdownException as handled_exception:
            if (self.active):
                exception = handled_exception.get_cause()
                if (exception is not None and self.log_handler is not None): self.log_handler.error(exception, context = "pas_server")
            #
        except Exception as handled_exception:
            if (self.active):
                if (self.log_handler is None): TracedException.print_current_stack_trace()
                else: self.log_handler.error(handled_exception, context = "pas_server")
            #
        finally: self.stop()
    #

    def set_log_handler(self, log_handler):
        """
Sets the LogHandler.

:param log_handler: LogHandler to use

:since: v0.2.00
        """

        self.log_handler = log_handler
    #

    def stop(self):
        """
Stops the listener and unqueues all running sockets.

:since: v0.2.00
        """

        # pylint: disable=broad-except

        self._lock.acquire()

        if (self.active):
            if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.stop()- (#echo(__LINE__)#)", self, context = "pas_server")

            self.active = False

            if (self.stopping_hook is not None and len(self.stopping_hook) > 0): Hook.unregister(self.stopping_hook, self.thread_stop)
            self.stopping_hook = ""

            self._lock.release()

            try: self.close()
            except Exception: pass

            self._active_unqueue_all()
        else: self._lock.release()
    #

    def thread_stop(self, params = None, last_return = None):
        """
Stops the running server instance by a stopping hook call.

:param params: Parameter specified
:param last_return: The return value from the last hook called.

:return: (mixed) Return value
:since:  v0.2.00
        """

        self.stop()
        return last_return
    #

    def _unqueue(self, queue, _socket):
        """
Unqueues a previously active socket connection.

:param queue: Queue object
:param id: Queue ID

:return: (bool) True on success
:since:  v0.2.00
        """

        # pylint: disable=broad-except

        _return = False

        self._lock.acquire()

        if (queue is not None and _socket in queue):
            queue.remove(_socket)
            self._lock.release()

            _return = True

            if (self.listener_handle_connections):
                try: _socket.close()
                except socket.error: pass
            #
        else: self._lock.release()

        return _return
    #

    def writable(self):
        """
python.org: Called each time around the asynchronous loop to determine
whether a channel's socket should be added to the list on which write events
can occur.

:return: (bool) Always False
:since:  v0.2.00
        """

        return False
    #

    @staticmethod
    def prepare_socket(listener_type, *listener_data):
        """
Prepare socket returns a bound socket for the given listener data.

:param listener_type: Listener type
:param listener_data: Listener data

:since: v0.2.00
        """

        _return = None

        if (listener_type == socket.AF_INET or listener_type == socket.AF_INET6):
            listener_data = ( Binary.str(listener_data[0]), listener_data[1] )

            _return = socket.socket(listener_type, socket.SOCK_STREAM)
            _return.setblocking(0)
            if (hasattr(socket, "SO_REUSEADDR")): _return.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            _return.bind(listener_data)
        elif (listener_type == socket.AF_UNIX):
            unixsocket_path_name = path.normpath(Binary.str(listener_data[0]))
            if (os.access(unixsocket_path_name, os.F_OK)): os.unlink(unixsocket_path_name)

            _return = socket.socket(listener_type, socket.SOCK_STREAM)
            if (hasattr(socket, "SO_REUSEADDR")): _return.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            _return.bind(unixsocket_path_name)

            socket_chmod = 0
            socket_chmod_value = int(Settings.get("pas_global_server_chmod_unix_sockets", "600"), 8)

            if ((1000 & socket_chmod_value) == 1000): socket_chmod |= stat.S_ISVTX
            if ((2000 & socket_chmod_value) == 2000): socket_chmod |= stat.S_ISGID
            if ((4000 & socket_chmod_value) == 4000): socket_chmod |= stat.S_ISUID
            if ((0o100 & socket_chmod_value) == 0o100): socket_chmod |= stat.S_IXUSR
            if ((0o200 & socket_chmod_value) == 0o200): socket_chmod |= stat.S_IWUSR
            if ((0o400 & socket_chmod_value) == 0o400): socket_chmod |= stat.S_IRUSR
            if ((0o010 & socket_chmod_value) == 0o010): socket_chmod |= stat.S_IXGRP
            if ((0o020 & socket_chmod_value) == 0o020): socket_chmod |= stat.S_IWGRP
            if ((0o040 & socket_chmod_value) == 0o040): socket_chmod |= stat.S_IRGRP
            if ((0o001 & socket_chmod_value) == 0o001): socket_chmod |= stat.S_IXOTH
            if ((0o002 & socket_chmod_value) == 0o002): socket_chmod |= stat.S_IWOTH
            if ((0o004 & socket_chmod_value) == 0o004): socket_chmod |= stat.S_IROTH

            os.chmod(unixsocket_path_name, socket_chmod)
        #

        return _return