def test_stop_reaper_thread(self):
        """Verify that calling stop_reaper will terminate the reaper thread.
        """
        pool = ConnectionPool(MessyConnector, backend='thread')
        assert pool._reaper.running
        pool.stop_reaper()

        for i in xrange(1000):
            if not pool._reaper.running:
                return
            time.sleep(0.01)

        assert False, 'Reaper thread not terminated in time.'
    def test_stop_reaper_thread(self):
        """Verify that calling stop_reaper will terminate the reaper thread.
        """
        pool = ConnectionPool(MessyConnector, backend='thread')
        assert pool._reaper.running
        pool.stop_reaper()

        for i in xrange(1000):
            if not pool._reaper.running:
                return
            time.sleep(0.01)

        assert False, 'Reaper thread not terminated in time.'
Exemple #3
0
 def setUp(self):
     # we set a pool to not keep connection opened during testing
     from socketpool import ConnectionPool
     from restkit.conn import Connection
     self.client_options = dict(
         pool=ConnectionPool(factory=Connection, max_lifetime=.001))
     super(TestRestkit, self).setUp()
def auth_get_repo(user_password):
    pool = ConnectionPool(factory=Connection)
    serverurl = "https://api.github.com"
    '''
    #print 'Enter your username:'******'+')[0]
    
    password = user_password.split('+')[1]
    # Add your username and password here, or prompt for them
    auth=BasicAuth(username, password)
     
    # Use your basic auth to request a token
    # This is just an example from http://developer.github.com/v3/
    authreqdata = { "scopes": [ "public_repo" ], "note": "admin script" }
    resource = Resource('https://api.github.com/authorizations', pool=pool, filters=[auth])
    response = resource.post(headers={ "Content-Type": "application/json" }, payload=json.dumps(authreqdata))
    token = json.loads(response.body_string())['token']
     '''
    """
    Once you have a token, you can pass that in the Authorization header
    You can store this in a cache and throw away the user/password
    This is just an example query.  See http://developer.github.com/v3/ 
    for more about the url structure
    """
    token = '94038d59a46c5ea1aa4f11626a83cde3e8794668'
    resource = Resource('https://api.github.com/user/repos', pool=pool)
    headers = {'Content-Type': 'application/json'}
    headers['Authorization'] = 'token %s' % token
    response = resource.get(headers=headers)
    repos = json.loads(response.body_string())
    for each in repos:
        git("clone", each['clone_url'])
 def __init__(self, user, token):
     self.pool = ConnectionPool(factory=Connection)
     self.token = token
     self.headers = {
         'Content-Type': 'application/json',
         'Authorization': 'token %s' % self.token
     }
    def __init__(self, *args, **kw):
        super(Socks5Server, self).__init__(*args, **kw)
        self.remote_pool = ConnectionPool(factory=TcpConnector,
                                          max_size=600,
                                          max_lifetime=3,
                                          backend="gevent")

        def log_tcp_pool_size(s):
            log("ConnPool size: %d" % self.remote_pool.size)
            spawn_later(10, s, s)

        def log_dns_pool_size(s):
            log("DNSPool size: %d" % len(self.HOSTCACHE))
            spawn_later(10, s, s)

        spawn_later(10, log_tcp_pool_size, log_tcp_pool_size)
        spawn_later(10, log_dns_pool_size, log_dns_pool_size)
Exemple #7
0
 def connect(self):
     """创建连接"""
     try:
         socket_pool = ConnectionPool(factory=TcpConnector, max_size=1024,
                                      options={'host': cons.BAOSTOCK_SERVER_IP, 'port': cons.BAOSTOCK_SERVER_PORT})
         setattr(context, "default_socket_pool", socket_pool)
     except Exception:
         print("服务器连接失败,请稍后再试。")
Exemple #8
0
def get_session(backend_name, **options):
    global _default_session

    if not _default_session:
        _default_session = {}
        pool = ConnectionPool(factory=Connection,
                              backend=backend_name,
                              **options)
        _default_session[backend_name] = pool
    else:
        if backend_name not in _default_session:
            pool = ConnectionPool(factory=Connection,
                                  backend=backend_name,
                                  **options)

            _default_session[backend_name] = pool
        else:
            pool = _default_session.get(backend_name)
    return pool
Exemple #9
0
 def __init__(self, endpoint, name, **kwargs):
     if endpoint.endswith('/'):
         endpoint = endpoint.rstrip('/')
     if 'pool' not in kwargs:
         kwargs['pool'] = ConnectionPool(factory=Connection)
     self.json_default = kwargs.get('json_default', json_util.default)
     self.json_object_hook = kwargs.get('json_object_hook',
                                        json_util.object_hook)
     self.resource = Resource(endpoint, **kwargs)
     self.name = name
Exemple #10
0
    def test_del(self):
        """Verify that garbage collection of the pool will release the reaper
        thread.
        """
        pool = ConnectionPool(MessyConnector, backend='thread')
        reaper = pool._reaper

        assert reaper.running

        # Remove reference.
        pool = None

        for i in xrange(1000):
            if not reaper.running:
                return
            time.sleep(0.01)

        assert False, 'Reaper thread not terminated in time.'
Exemple #11
0
 def __init__(self,
              url=GITHUB_URL,
              params=None,
              payload=None,
              headers=None,
              filters=None,
              access_token=None):
     self.url = url
     self.params = params or dict()
     self.payload = payload or dict()
     self.headers = headers or {'Content-Type': 'application/json'}
     filters = filters or list()
     self.resource = Resource(
         url,
         pool=ConnectionPool(factory=Connection),
         filters=filters,
     )
     if access_token is not None:
         self.params["access_token"] = access_token
Exemple #12
0
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.

from six.moves.urllib import parse as urlparse

from webob import Request
from restkit.contrib.wsgi_proxy import HostProxy

import restkit
from restkit.conn import Connection
from socketpool import ConnectionPool

restkit.set_logging("debug")

pool = ConnectionPool(factory=Connection, max_size=10, backend="thread")
proxy = HostProxy("http://127.0.0.1:5984", pool=pool)


def application(environ, start_response):
    req = Request(environ)
    if 'RAW_URI' in req.environ:
        # gunicorn so we use real path non encoded
        u = urlparse.urlparse(req.environ['RAW_URI'])
        req.environ['PATH_INFO'] = u.path

    # do smth like adding oauth headers ..
    resp = req.get_response(proxy)

    # rewrite response
    # do auth ...
def make_auctions_app(global_conf,
                      redis_url='redis://localhost:9002/1',
                      redis_password='',
                      redis_database='',
                      sentinel_cluster_name='',
                      sentinels='',
                      external_couch_url='http://localhost:5000/auction',
                      internal_couch_url='http://localhost:9000/',
                      proxy_internal_couch_url='http://localhost:9000/',
                      auctions_db='database',
                      hash_secret_key='',
                      timezone='Europe/Kiev',
                      preferred_url_scheme='http',
                      debug=False,
                      auto_build=False,
                      event_source_connection_limit=1000,
                      limit_replications_progress=99,
                      limit_replications_func='any'):
    """
    [app:main]
    use = egg:openprocurement.auction#auctions_server
    redis_url = redis://:passwod@localhost:1111/0
    external_couch_url = http://localhost:1111/auction
    internal_couch_url = http://localhost:9011/
    auctions_db = auction
    timezone = Europe/Kiev
    """
    auctions_server = components.queryUtility(IAuctionsServer)
    auctions_server.proxy_connection_pool = ConnectionPool(factory=Connection,
                                                           max_size=20,
                                                           backend='gevent')
    auctions_server.proxy_mappings = Memoizer({})
    auctions_server.event_sources_pool = deque([])
    auctions_server.config['PREFERRED_URL_SCHEME'] = preferred_url_scheme
    auctions_server.config['limit_replications_progress'] = float(
        limit_replications_progress)
    auctions_server.config['limit_replications_func'] = limit_replications_func

    auctions_server.config['REDIS'] = {
        'redis': redis_url,
        'redis_password': redis_password,
        'redis_database': redis_database,
        'sentinel_cluster_name': sentinel_cluster_name,
        'sentinel': loads(sentinels)
    }

    auctions_server.config['event_source_connection_limit'] = int(
        event_source_connection_limit)
    auctions_server.config['EXT_COUCH_DB'] = urljoin(external_couch_url,
                                                     auctions_db)
    auctions_server.add_url_rule('/' + auctions_db + '/<path:path>',
                                 'couch_server_proxy',
                                 couch_server_proxy,
                                 methods=['GET'])
    auctions_server.add_url_rule('/' + auctions_db + '/',
                                 'couch_server_proxy',
                                 couch_server_proxy,
                                 methods=['GET'],
                                 defaults={'path': ''})

    auctions_server.add_url_rule('/' + auctions_db + '_secured/<path:path>',
                                 'auth_couch_server_proxy',
                                 auth_couch_server_proxy,
                                 methods=['GET'])
    auctions_server.add_url_rule('/' + auctions_db + '_secured/',
                                 'auth_couch_server_proxy',
                                 auth_couch_server_proxy,
                                 methods=['GET'],
                                 defaults={'path': ''})

    auctions_server.config['INT_COUCH_URL'] = internal_couch_url
    auctions_server.config['PROXY_COUCH_URL'] = proxy_internal_couch_url
    auctions_server.config['COUCH_DB'] = auctions_db
    auctions_server.config['TIMEZONE'] = tz(timezone)

    auctions_server.couch_server = Server(
        auctions_server.config.get('INT_COUCH_URL'),
        session=Session(retry_delays=range(10)))
    if auctions_server.config['COUCH_DB'] not in auctions_server.couch_server:
        auctions_server.couch_server.create(auctions_server.config['COUCH_DB'])

    auctions_server.db = auctions_server.\
        couch_server[auctions_server.config['COUCH_DB']]
    auctions_server.config['HASH_SECRET_KEY'] = hash_secret_key
    sync_design(auctions_server.db)
    for entry_point in iter_entry_points(PKG_NAMESPACE):
        plugin = entry_point.load()
        plugin(components)
    return auctions_server
Exemple #14
0
 def test_size_on_isconnected_failure(self):
     pool = ConnectionPool(MessyConnector)
     assert pool.size == 0
     pytest.raises(MaxTriesError, pool.get)
Exemple #15
0
import json
from restkit import Resource, BasicAuth, Connection, request
from socketpool import ConnectionPool
import getpass
 
pool = ConnectionPool(factory=Connection)
serverurl="https://api.github.com"
print 'Enter your username:'******'https://api.github.com/authorizations', pool=pool, filters=[auth])
response = resource.post(headers={ "Content-Type": "application/json" }, payload=json.dumps(authreqdata))
token = json.loads(response.body_string())['token']
"""
Once you have a token, you can pass that in the Authorization header
You can store this in a cache and throw away the user/password
This is just an example query.  See http://developer.github.com/v3/ 
for more about the url structure
"""
#token = '94038d59a46c5ea1aa4f11626a83cde3e8794668' 
resource = Resource('https://api.github.com/user/repos', pool=pool)
headers = {'Content-Type' : 'application/json' }
headers['Authorization'] = 'token %s' % token
response = resource.get(headers = headers)
repos = json.loads(response.body_string())
Exemple #16
0
def make_auctions_app(global_conf,
                      redis_url='redis://localhost:7777/0',
                      external_couch_url='http://localhost:5000/auction',
                      internal_couch_url='http://localhost:9000/',
                      proxy_internal_couch_url='http://localhost:9000/',
                      auctions_db='auctions',
                      hash_secret_key='',
                      timezone='Europe/Kiev',
                      preferred_url_scheme='http',
                      debug=False,
                      auto_build=False,
                      event_source_connection_limit=1000):
    """
    [app:main]
    use = egg:openprocurement.auction#auctions_server
    redis_url = redis://:passwod@localhost:1111/0
    external_couch_url = http://localhost:1111/auction
    internal_couch_url = http://localhost:9011/
    auctions_db = auction
    timezone = Europe/Kiev
    """
    auctions_server.proxy_connection_pool = ConnectionPool(factory=Connection,
                                                           max_size=20,
                                                           backend="gevent")
    auctions_server.proxy_mappings = Memoizer({})
    auctions_server.event_sources_pool = deque([])
    auctions_server.config['PREFERRED_URL_SCHEME'] = preferred_url_scheme
    auctions_server.config['REDIS_URL'] = redis_url
    auctions_server.config['event_source_connection_limit'] = int(
        event_source_connection_limit)
    auctions_server.config['EXT_COUCH_DB'] = urljoin(external_couch_url,
                                                     auctions_db)
    auctions_server.add_url_rule('/' + auctions_db + '/<path:path>',
                                 'couch_server_proxy',
                                 couch_server_proxy,
                                 methods=['GET'])
    auctions_server.add_url_rule('/' + auctions_db + '/',
                                 'couch_server_proxy',
                                 couch_server_proxy,
                                 methods=['GET'],
                                 defaults={'path': ''})

    auctions_server.add_url_rule('/' + auctions_db + '_secured/<path:path>',
                                 'auth_couch_server_proxy',
                                 auth_couch_server_proxy,
                                 methods=['GET'])
    auctions_server.add_url_rule('/' + auctions_db + '_secured/',
                                 'auth_couch_server_proxy',
                                 auth_couch_server_proxy,
                                 methods=['GET'],
                                 defaults={'path': ''})

    auctions_server.config['INT_COUCH_URL'] = internal_couch_url
    auctions_server.config['PROXY_COUCH_URL'] = proxy_internal_couch_url
    auctions_server.config['COUCH_DB'] = auctions_db
    auctions_server.config['TIMEZONE'] = tz(timezone)
    auctions_server.redis = Redis(auctions_server)
    auctions_server.couch_server = Server(
        auctions_server.config.get('INT_COUCH_URL'),
        session=Session(retry_delays=range(10)))
    if auctions_server.config['COUCH_DB'] not in auctions_server.couch_server:
        auctions_server.couch_server.create(auctions_server.config['COUCH_DB'])

    auctions_server.db = auctions_server.couch_server[
        auctions_server.config['COUCH_DB']]
    auctions_server.config['HASH_SECRET_KEY'] = hash_secret_key
    sync_design(auctions_server.db)
    auctions_server.config['ASSETS_DEBUG'] = True if debug else False
    assets.auto_build = True if auto_build else False
    return auctions_server
Exemple #17
0
 def pool(self):
     pool_keepalive = int(self.app.config.get('COUCHDB_KEEPALIVE'))
     pool_backend = self.app.config.get('COUCHDB_BACKEND')
     return ConnectionPool(Connection,
                           max_size=pool_keepalive,
                           backend=pool_backend)
class Socks5Server(StreamServer):

    HOSTCACHE = {}
    HOSTCACHETIME = 1800

    def __init__(self, *args, **kw):
        super(Socks5Server, self).__init__(*args, **kw)
        self.remote_pool = ConnectionPool(factory=TcpConnector,
                                          max_conn=None,
                                          max_size=600,
                                          max_lifetime=300,
                                          backend="gevent")

        def log_tcp_pool_size(s):
            log("ConnPool size: %d, alive: %d" %
                (self.remote_pool.size(), self.remote_pool.alive()))
            spawn_later(10, s, s)

        def log_dns_pool_size(s):
            log("DNSPool size: %d" % len(self.HOSTCACHE))
            spawn_later(10, s, s)

        spawn_later(10, log_tcp_pool_size, log_tcp_pool_size)
        spawn_later(10, log_dns_pool_size, log_dns_pool_size)

    def close(self):
        self.remote_pool.release_all()
        super(Socks5Server, self).close()

    def handle(self, sock, address):
        rfile = sock.makefile('rb', -1)
        try:
            log('socks connection from ' + str(address))

            # 1. Version
            sock.recv(262)
            sock.send(b"\x05\x00")

            # 2. Request
            data = rfile.read(4)
            mode = ord(data[1])
            addrtype = ord(data[3])

            if addrtype == 1:  # IPv4
                addr = socket.inet_ntoa(rfile.read(4))
            elif addrtype == 3:  # Domain name
                domain = rfile.read(ord(sock.recv(1)[0]))
                addr = self.handle_dns(domain)

            port = struct.unpack('>H', rfile.read(2))

            if mode == 1:  # 1. Tcp connect
                try:
                    remote = self.remote_pool.get(host=addr, port=port[0])
                    reply = b"\x05\x00\x00\x01" + socket.inet_aton(addr) + \
                                struct.pack(">H", port[0])
                    sock.send(reply)
                    log('Begin data, %s:%s' % (addr, port[0]))
                    # 3. Transfering
                    l1 = spawn(self.handle_tcp, sock, remote)
                    l2 = spawn(self.handle_tcp, remote, sock)
                    gevent.joinall((l1, l2))
                    self.remote_pool.release_connection(remote)

                except socket.error:
                    log('Conn refused, %s:%s' % (addr, port[0]))
                    # Connection refused
                    reply = b'\x05\x05\x00\x01\x00\x00\x00\x00\x00\x00'
                    sock.send(reply)
                    raise

            else:
                reply = b"\x05\x07\x00\x01"  # Command not supported
                sock.send(reply)

        except socket.error:
            pass
        finally:
            log("Close handle")
            rfile.close()
            sock._sock.close()
            sock.close()

    def handle_dns(self, domain):

        if domain not in self.HOSTCACHE:
            log('Resolving ' + domain)
            addr = gethostbyname(domain)
            self.HOSTCACHE[domain] = addr
            spawn_later(self.HOSTCACHETIME,
                        lambda a: self.HOSTCACHE.pop(a, None), domain)
        else:
            addr = self.HOSTCACHE[domain]
            log('Hit resolv %s -> %s in cache' % (domain, addr))

        return addr

    def handle_tcp(self, fr, to):
        try:
            while to.send(fr.recv(4096)) > 0:
                continue
        except socket.error:
            pass
class Socks5Server(StreamServer):
    HOSTCACHE = {}
    HOSTCACHETIME = 1800

    def __init__(self, *args, **kw):
        super(Socks5Server, self).__init__(*args, **kw)
        self.remote_pool = ConnectionPool(factory=TcpConnector,
                                          max_size=600,
                                          max_lifetime=3,
                                          backend="gevent")

        def log_tcp_pool_size(s):
            log("ConnPool size: %d" % self.remote_pool.size)
            spawn_later(10, s, s)

        def log_dns_pool_size(s):
            log("DNSPool size: %d" % len(self.HOSTCACHE))
            spawn_later(10, s, s)

        spawn_later(10, log_tcp_pool_size, log_tcp_pool_size)
        spawn_later(10, log_dns_pool_size, log_dns_pool_size)

    def close(self):
        self.remote_pool.release_all()
        super(Socks5Server, self).close()

    def handle(self, sock, address):
        sock_file = sock.makefile('rb', -1)
        remote = None
        try:
            log('socks connection from ' + str(address))
            # 1. 设置handle超时
            sock.settimeout(10)
            sock.recv(262)
            sock.send(b"\x05\x00")
            # 2. 收到请求
            data = sock_file.read(4)
            decode_data = data.decode(encoding='ascii')
            mode = ord(decode_data[1])
            address_type = ord(decode_data[3])
            remote_address, port = self.get_remote_address_port(
                address_type, sock_file)
            if mode == 1:  # 1. 创建tcp连接
                self.create_remote_connection_pipe(remote_address, port, sock)
            else:
                reply = b"\x05\x07\x00\x01"  # 不支持的命令
                sock.send(reply)
                raise socket.error
        except socket.error:
            pass
        finally:
            if remote is not None:
                self.remote_pool.release_connection(remote)
            log("Close handle")
            sock_file.close()
            sock.close()

    def create_remote_connection_pipe(self, address, port, sock):
        """
        :param address: 远程地址
        :param port: 远程端口
        :param sock: 本地连接的sock
        :return:
        """
        remote = None
        try:
            remote = self.remote_pool.get(host=address, port=port)
            if self.remote_pool.too_old(remote):
                self.remote_pool.release_connection(remote)
                remote = self.remote_pool.get(host=address, port=port)
            reply = b"\x05\x00\x00\x01" + socket.inet_aton(address) + \
                    struct.pack(">H", port)
            sock.send(reply)
            log('Begin data, %s:%s' % (address, port))
            # 3.  协程监听sock读写
            l1 = spawn(handle_tcp, sock, remote)
            l2 = spawn(handle_tcp, remote, sock)
            gevent.joinall((l1, l2))
        except socket.error as error:
            log('Conn refused, %s:%s' % (address, port))
            # Connection refused
            reply = b'\x05\x05\x00\x01\x00\x00\x00\x00\x00\x00'
            sock.send(reply)
            raise error
        finally:
            if remote is not None:
                self.remote_pool.release_connection(remote)

    def get_remote_address_port(self, address_type, sock_file):
        address = None
        if address_type == AddressType.IPV4.value:  # ipv4地址
            address = socket.inet_ntoa(sock_file.read(4))
        elif address_type == AddressType.URL.value:  # 域名
            domain_length = ord(decode(sock_file.read(1)))
            domain = decode(sock_file.read(domain_length))
            address = self.handle_dns(domain)
        if address is None:
            raise ValueError('地址错误')
        port = struct.unpack('>H', sock_file.read(2))[0]
        return address, port

    def handle_dns(self, domain):

        if domain not in self.HOSTCACHE:
            log('Resolving ' + domain)
            addr = gethostbyname(domain)
            self.HOSTCACHE[domain] = addr
            spawn_later(self.HOSTCACHETIME,
                        lambda a: self.HOSTCACHE.pop(a, None), domain)
        else:
            addr = self.HOSTCACHE[domain]
            log('Hit resolv %s -> %s in cache' % (domain, addr))

        return addr
    from gevent import monkey
    monkey.patch_all()

    import gevent.pool
    import time

    from restkit import *
    from socketpool import ConnectionPool

    url = 'http://127.0.0.1/index.html'

    N = 1000
    C = 10

    Pool = ConnectionPool(factory=Connection,backend="gevent",max_size=C,timeout=300)


    def run():
        response = request(url,follow_redirect=True,pool=Pool)
        response.body_string()
        assert response.status_int == 200


    group = gevent.pool.Pool(size=C)

    now = time.time()
    for _ in xrange(N):
        group.spawn(run)
    group.join()
Exemple #21
0
import socket
import json
from socketpool import ConnectionPool, TcpConnector
import time

HOST = '192.168.0.106'
PORT = 10010
SIZE = 1024
POOL = ConnectionPool(factory=TcpConnector, backend="thread", timeout=10)
"""
测试用,实际Bigfish并未用到
"""


def socket_test():
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    sock.connect((HOST, PORT))
    query_id = time.time()
    message = {
        'opt': "GETDATA",
        'query_id': query_id,
        'symbols': ["XAUUSD"],
        'tf': "M1",
        'start_time': "2015-11-03",
        'end_time': "2015-11-04",
        'size': 90
    }
    try:
        data = {query_id: []}
        times = 0
Exemple #22
0
import timeit

import eventlet
eventlet.monkey_patch()

from restkit import *
from restkit.conn import Connection
from socketpool import ConnectionPool

#set_logging("debug")

pool = ConnectionPool(factory=Connection, backend="eventlet")

epool = eventlet.GreenPool()

urls = [
    "http://yahoo.fr", "http://google.com", "http://friendpaste.com",
    "http://benoitc.io", "http://couchdb.apache.org"
]

allurls = []
for i in range(10):
    allurls.extend(urls)


def fetch(u):
    r = request(u, follow_redirect=True, pool=pool)
    print "RESULT: %s: %s (%s)" % (u, r.status, len(r.body_string()))


def extract():
Exemple #23
0
def echo(sock, address):
    print('New connection from %s:%s' % address)

    while True:
        data = sock.recv(1024)
        if not data:
            break
        sock.send(data)
        print("echoed %r" % data)


if __name__ == '__main__':
    import time

    options = {'host': 'localhost', 'port': 6000}
    pool = ConnectionPool(factory=TcpConnector, backend="gevent")
    server = StreamServer(('localhost', 6000), echo)
    gevent.spawn(server.serve_forever)

    def runpool(data):
        print 'ok'
        with pool.connection(**options) as conn:
            print 'sending'
            sent = conn.send(data)
            print 'send %d bytes' % sent
            echo_data = conn.recv(1024)
            print "got %s" % data
            assert data == echo_data

    start = time.time()
    jobs = [gevent.spawn(runpool, "blahblah") for _ in xrange(20)]