def run_server(conf, logger, sock, global_conf=None): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to the timezone in which # the server first starts running in locations that periodically change # timezones. os.environ['TZ'] = time.strftime("%z", time.gmtime()) wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. wsgi.HttpProtocol.log_request = lambda *a: None # Redirect logging other messages by the underlying WSGI software. wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) eventlet.patcher.monkey_patch(all=False, socket=True) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # utils.LogAdapter stashes name in server; fallback on unadapted loggers if not global_conf: if hasattr(logger, 'server'): log_name = logger.server else: log_name = logger.name global_conf = {'log_name': log_name} app = loadapp(conf['__file__'], global_conf=global_conf) max_clients = int(conf.get('max_clients', '1024')) pool = RestrictedGreenPool(size=max_clients) try: wsgi.server(sock, app, NullLogger(), custom_pool=pool) except socket.error as err: if err[0] != errno.EINVAL: raise pool.waitall()
def run_server(conf, logger, sock, global_conf=None): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() if eventlet_debug: # let eventlet.wsgi.server log to stderr wsgi_logger = None # utils.LogAdapter stashes name in server; fallback on unadapted loggers if not global_conf: if hasattr(logger, 'server'): log_name = logger.server else: log_name = logger.name global_conf = {'log_name': log_name} # loadapp方法从proxy-server.conf中加载其中的app, # 调用其中的app_factory方法实例化功能类(Application) # conf = {"user": "******", "allow_account_management": "true", # "bind_port": "8080", "__name__": "proxy-server", # "__file__": "/etc/swift/proxy-server.conf", # "account_autocreate": "true", "here": "/etc/swift"} # global_conf = {"log_name": "proxy-server"} app = loadapp(conf['__file__'], global_conf=global_conf) max_clients = int(conf.get('max_clients', '1024')) # dict.get(key, default=None),key不在,则返回默认值 pool = RestrictedGreenPool(size=max_clients) # Select which protocol class to use (normal or one expecting PROXY # protocol) if config_true_value(conf.get('require_proxy_protocol', 'no')): protocol_class = SwiftHttpProxiedProtocol else: protocol_class = SwiftHttpProtocol server_kwargs = { 'custom_pool': pool, 'protocol': protocol_class, # Disable capitalizing headers in Eventlet. This is necessary for # the AWS SDK to work with s3api middleware (it needs an "ETag" # header; "Etag" just won't do). 'capitalize_response_headers': False, } # server:从提供的服务器套接字启动一个的WSGI服务器处理请求;这个方法将会一直循环; # 当服务退出之后,sock对象将会关闭;但是底层描述符将会继续开放; try: wsgi.server(sock, app, wsgi_logger, **server_kwargs) except socket.error as err: if err[0] != errno.EINVAL: raise # 等待池子中的所有 greenthreads 完成工作 pool.waitall()
def run_server(conf, logger, sock, global_conf=None): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to the timezone in which # the server first starts running in locations that periodically change # timezones. os.environ['TZ'] = time.strftime("%z", time.gmtime()) wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. wsgi.HttpProtocol.log_request = lambda *a: None # Redirect logging other messages by the underlying WSGI software. wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) # NOTE(sileht): # monkey-patching thread is required by python-keystoneclient; # monkey-patching select is required by oslo.messaging pika driver # if thread is monkey-patched. eventlet.patcher.monkey_patch(all=False, socket=True, select=True, thread=True) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() if eventlet_debug: # let eventlet.wsgi.server log to stderr wsgi_logger = None # utils.LogAdapter stashes name in server; fallback on unadapted loggers if not global_conf: if hasattr(logger, 'server'): log_name = logger.server else: log_name = logger.name global_conf = {'log_name': log_name} app = loadapp(conf['__file__'], global_conf=global_conf) max_clients = int(conf.get('max_clients', '1024')) pool = RestrictedGreenPool(size=max_clients) try: # Disable capitalizing headers in Eventlet if possible. This is # necessary for the AWS SDK to work with swift3 middleware. argspec = inspect.getargspec(wsgi.server) if 'capitalize_response_headers' in argspec.args: wsgi.server(sock, app, wsgi_logger, custom_pool=pool, capitalize_response_headers=False) else: wsgi.server(sock, app, wsgi_logger, custom_pool=pool) except socket.error as err: if err[0] != errno.EINVAL: raise pool.waitall()
def setUp(self): global _servers, keystone0_lis, keystone1_lis, keystone0_srv, keystone1_srv, keystone0_spa, keystone1_spa nl = NullLogger() keystone0_lis = listen(('localhost', 5001)) keystone1_lis = listen(('localhost', 15001)) keystone0_srv = DummySrv('http://127.0.0.1:5001') keystone1_srv = DummySrv('http://127.0.0.1:15001') keystone0_spa = spawn(wsgi.server, keystone0_lis, keystone0_srv, nl) keystone1_spa = spawn(wsgi.server, keystone1_lis, keystone1_srv, nl) _servers = (keystone0_spa, keystone1_spa)
def run_server(): wsgi.HttpProtocol.default_request_version = "HTTP/1.0" eventlet.hubs.use_hub('poll') eventlet.patcher.monkey_patch(all=False, socket=True) monkey_patch_mimetools() pool = GreenPool(size=1024) try: wsgi.server(sock, app, NullLogger(), custom_pool=pool) except socket.error, err: if err[0] != errno.EINVAL: raise
def setUp(self): global _servers, proxy0_srv, proxy1_srv, webcache0_srv, proxy0_lis, \ proxy1_lis, webcache0_lis nl = NullLogger() conf = get_config() proxy0_lis = listen(('localhost', 8080)) proxy1_lis = listen(('localhost', 18080)) webcache0_lis = listen(('localhost', 8888)) proxy0_srv = DummySrv('http://127.0.0.1:8080') proxy1_srv = DummySrv('http://127.0.0.1:18080') webcache0_srv = DummySrv('http://127.0.0.1:8888') proxy0_spa = spawn(wsgi.server, proxy0_lis, proxy0_srv, nl) proxy1_spa = spawn(wsgi.server, proxy1_lis, proxy1_srv, nl) webcache0_spa = spawn(wsgi.server, webcache0_lis, webcache0_srv, nl) _servers = (proxy0_spa, proxy1_spa, webcache0_spa)
def run_server(conf, logger, sock, global_conf=None): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. wsgi.HttpProtocol.log_request = lambda *a: None # Redirect logging other messages by the underlying WSGI software. wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() if eventlet_debug: # let eventlet.wsgi.server log to stderr wsgi_logger = None # utils.LogAdapter stashes name in server; fallback on unadapted loggers if not global_conf: if hasattr(logger, 'server'): log_name = logger.server else: log_name = logger.name global_conf = {'log_name': log_name} app = loadapp(conf['__file__'], global_conf=global_conf) max_clients = int(conf.get('max_clients', '1024')) pool = RestrictedGreenPool(size=max_clients) try: # Disable capitalizing headers in Eventlet if possible. This is # necessary for the AWS SDK to work with swift3 middleware. argspec = inspect.getargspec(wsgi.server) if 'capitalize_response_headers' in argspec.args: wsgi.server(sock, app, wsgi_logger, custom_pool=pool, capitalize_response_headers=False) else: wsgi.server(sock, app, wsgi_logger, custom_pool=pool) except socket.error as err: if err[0] != errno.EINVAL: raise pool.waitall()
def run_server(conf, logger, sock, global_conf=None): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() if eventlet_debug: # let eventlet.wsgi.server log to stderr wsgi_logger = None # utils.LogAdapter stashes name in server; fallback on unadapted loggers if not global_conf: if hasattr(logger, 'server'): log_name = logger.server else: log_name = logger.name global_conf = {'log_name': log_name} app = loadapp(conf['__file__'], global_conf=global_conf) max_clients = int(conf.get('max_clients', '1024')) pool = RestrictedGreenPool(size=max_clients) # Select which protocol class to use (normal or one expecting PROXY # protocol) if config_true_value(conf.get('require_proxy_protocol', 'no')): protocol_class = SwiftHttpProxiedProtocol else: protocol_class = SwiftHttpProtocol server_kwargs = { 'custom_pool': pool, 'protocol': protocol_class, # Disable capitalizing headers in Eventlet. This is necessary for # the AWS SDK to work with s3api middleware (it needs an "ETag" # header; "Etag" just won't do). 'capitalize_response_headers': False, } try: wsgi.server(sock, app, wsgi_logger, **server_kwargs) except socket.error as err: if err[0] != errno.EINVAL: raise pool.waitall()
def run_server(): wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. wsgi.HttpProtocol.log_request = lambda *a: None # Redirect logging other messages by the underlying WSGI software. wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub('poll') eventlet.patcher.monkey_patch(all=False, socket=True) app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name}) pool = GreenPool(size=1024) try: wsgi.server(sock, app, NullLogger(), custom_pool=pool) except socket.error, err: if err[0] != errno.EINVAL: raise
def setup_servers(the_object_server=object_server, extra_conf=None): """ Setup proxy, account, container and object servers using a set of fake rings and policies. :param the_object_server: The object server module to use (optional, defaults to swift.obj.server) :param extra_conf: A dict of config options that will update the basic config passed to all server instances. :returns: A dict containing the following entries: orig_POLICIES: the value of storage_policy.POLICIES prior to it being patched with fake policies orig_SysLogHandler: the value of utils.SysLogHandler prior to it being patched testdir: root directory used for test files test_POLICIES: a StoragePolicyCollection of fake policies test_servers: a tuple of test server instances test_sockets: a tuple of sockets used by test servers test_coros: a tuple of greenthreads in which test servers are running """ context = { "orig_POLICIES": storage_policy._POLICIES, "orig_SysLogHandler": utils.SysLogHandler} utils.HASH_PATH_SUFFIX = b'endcap' utils.SysLogHandler = mock.MagicMock() # Since we're starting up a lot here, we're going to test more than # just chunked puts; we're also going to test parts of # proxy_server.Application we couldn't get to easily otherwise. context["testdir"] = _testdir = \ os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked') mkdirs(_testdir) rmtree(_testdir) for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1', 'sdf1', 'sdg1', 'sdh1', 'sdi1', 'sdj1', 'sdk1', 'sdl1'): mkdirs(os.path.join(_testdir, drive, 'tmp')) conf = {'devices': _testdir, 'swift_dir': _testdir, 'mount_check': 'false', 'allowed_headers': 'content-encoding, x-object-manifest, content-disposition, foo', 'allow_versions': 't'} if extra_conf: conf.update(extra_conf) prolis = listen_zero() acc1lis = listen_zero() acc2lis = listen_zero() con1lis = listen_zero() con2lis = listen_zero() obj1lis = listen_zero() obj2lis = listen_zero() obj3lis = listen_zero() obj4lis = listen_zero() obj5lis = listen_zero() obj6lis = listen_zero() objsocks = [obj1lis, obj2lis, obj3lis, obj4lis, obj5lis, obj6lis] context["test_sockets"] = \ (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) account_ring_path = os.path.join(_testdir, 'account.ring.gz') account_devs = [ {'port': acc1lis.getsockname()[1]}, {'port': acc2lis.getsockname()[1]}, ] write_fake_ring(account_ring_path, *account_devs) container_ring_path = os.path.join(_testdir, 'container.ring.gz') container_devs = [ {'port': con1lis.getsockname()[1]}, {'port': con2lis.getsockname()[1]}, ] write_fake_ring(container_ring_path, *container_devs) storage_policy._POLICIES = storage_policy.StoragePolicyCollection([ StoragePolicy(0, 'zero', True), StoragePolicy(1, 'one', False), StoragePolicy(2, 'two', False), ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=2, ec_nparity=1, ec_segment_size=4096), ECStoragePolicy(4, 'ec-dup', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=2, ec_nparity=1, ec_segment_size=4096, ec_duplication_factor=2)]) obj_rings = { 0: ('sda1', 'sdb1'), 1: ('sdc1', 'sdd1'), 2: ('sde1', 'sdf1'), # sdg1, sdh1, sdi1 taken by policy 3 (see below) } for policy_index, devices in obj_rings.items(): policy = storage_policy.POLICIES[policy_index] obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz') obj_devs = [ {'port': objsock.getsockname()[1], 'device': dev} for objsock, dev in zip(objsocks, devices)] write_fake_ring(obj_ring_path, *obj_devs) # write_fake_ring can't handle a 3-element ring, and the EC policy needs # at least 6 devs to work with (ec_k=2, ec_m=1, duplication_factor=2), # so we do it manually devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1', 'port': obj1lis.getsockname()[1]}, {'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1', 'port': obj2lis.getsockname()[1]}, {'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1', 'port': obj3lis.getsockname()[1]}, {'id': 3, 'zone': 0, 'device': 'sdj1', 'ip': '127.0.0.1', 'port': obj4lis.getsockname()[1]}, {'id': 4, 'zone': 0, 'device': 'sdk1', 'ip': '127.0.0.1', 'port': obj5lis.getsockname()[1]}, {'id': 5, 'zone': 0, 'device': 'sdl1', 'ip': '127.0.0.1', 'port': obj6lis.getsockname()[1]}] pol3_replica2part2dev_id = [[0, 1, 2, 0], [1, 2, 0, 1], [2, 0, 1, 2]] pol4_replica2part2dev_id = [[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 0], [4, 5, 0, 1], [5, 0, 1, 2]] obj3_ring_path = os.path.join( _testdir, storage_policy.POLICIES[3].ring_name + '.ring.gz') part_shift = 30 with closing(GzipFile(obj3_ring_path, 'wb')) as fh: pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh) obj4_ring_path = os.path.join( _testdir, storage_policy.POLICIES[4].ring_name + '.ring.gz') part_shift = 30 with closing(GzipFile(obj4_ring_path, 'wb')) as fh: pickle.dump(RingData(pol4_replica2part2dev_id, devs, part_shift), fh) prosrv = proxy_server.Application(conf, logger=debug_logger('proxy')) for policy in storage_policy.POLICIES: # make sure all the rings are loaded prosrv.get_object_ring(policy.idx) # don't lose this one! context["test_POLICIES"] = storage_policy._POLICIES acc1srv = account_server.AccountController( conf, logger=debug_logger('acct1')) acc2srv = account_server.AccountController( conf, logger=debug_logger('acct2')) con1srv = container_server.ContainerController( conf, logger=debug_logger('cont1')) con2srv = container_server.ContainerController( conf, logger=debug_logger('cont2')) obj1srv = the_object_server.ObjectController( conf, logger=debug_logger('obj1')) obj2srv = the_object_server.ObjectController( conf, logger=debug_logger('obj2')) obj3srv = the_object_server.ObjectController( conf, logger=debug_logger('obj3')) obj4srv = the_object_server.ObjectController( conf, logger=debug_logger('obj4')) obj5srv = the_object_server.ObjectController( conf, logger=debug_logger('obj5')) obj6srv = the_object_server.ObjectController( conf, logger=debug_logger('obj6')) context["test_servers"] = \ (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) nl = NullLogger() logging_prosv = proxy_logging.ProxyLoggingMiddleware( listing_formats.ListingFilter(prosrv), conf, logger=prosrv.logger) prospa = spawn(wsgi.server, prolis, logging_prosv, nl) acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl) acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl) con1spa = spawn(wsgi.server, con1lis, con1srv, nl) con2spa = spawn(wsgi.server, con2lis, con2srv, nl) obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl) obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl) obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl) obj4spa = spawn(wsgi.server, obj4lis, obj4srv, nl) obj5spa = spawn(wsgi.server, obj5lis, obj5srv, nl) obj6spa = spawn(wsgi.server, obj6lis, obj6srv, nl) context["test_coros"] = \ (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa, obj4spa, obj5spa, obj6spa) # Create account ts = normalize_timestamp(time.time()) partition, nodes = prosrv.account_ring.get_nodes('a') for node in nodes: conn = swift.proxy.controllers.obj.http_connect(node['ip'], node['port'], node['device'], partition, 'PUT', '/a', {'X-Timestamp': ts, 'x-trans-id': 'test'}) resp = conn.getresponse() assert(resp.status == 201) # Create another account # used for account-to-account tests ts = normalize_timestamp(time.time()) partition, nodes = prosrv.account_ring.get_nodes('a1') for node in nodes: conn = swift.proxy.controllers.obj.http_connect(node['ip'], node['port'], node['device'], partition, 'PUT', '/a1', {'X-Timestamp': ts, 'x-trans-id': 'test'}) resp = conn.getresponse() assert(resp.status == 201) # Create containers, 1 per test policy sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\n' 'Content-Length: 0\r\n\r\n') fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % ( exp, headers[:len(exp)]) # Create container in other account # used for account-to-account tests sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\n' 'Content-Length: 0\r\n\r\n') fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % ( exp, headers[:len(exp)]) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write( 'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n' 'Content-Length: 0\r\n\r\n') fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' assert headers[:len(exp)] == exp, \ "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)]) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write( 'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n' 'Content-Length: 0\r\n\r\n') fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' assert headers[:len(exp)] == exp, \ "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)]) return context
def setUp(self): global _servers, lis, srv, spa nl = NullLogger() lis = listen(('localhost', 8080)) srv = DummySrv() spa = spawn(wsgi.server, lis, srv, nl)
def setUp(self): global keystone0_lis, keystone0_srv, keystone0_spa nl = NullLogger() keystone0_lis = listen(('localhost', 15000)) keystone0_srv = DummySrv('http://127.0.0.1:15000') keystone0_spa = spawn(wsgi.server, keystone0_lis, keystone0_srv, nl)