def test_chunked_put(self): listener = listen(('localhost', 0)) port = listener.getsockname()[1] killer = spawn(wsgi.server, listener, self.object_controller, NullLogger()) sock = connect_tcp(('localhost', port)) fd = sock.makefile() fd.write('PUT /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n' 'Content-Type: text/plain\r\n' 'Connection: close\r\nX-Timestamp: 1.0\r\n' 'Transfer-Encoding: chunked\r\n\r\n' '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n') fd.flush() readuntil2crlfs(fd) sock = connect_tcp(('localhost', port)) fd = sock.makefile() fd.write('GET /sda1/p/a/c/o HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\n\r\n') fd.flush() readuntil2crlfs(fd) response = fd.read() self.assertEquals(response, 'oh hai') killer.kill()
def setup_servers(the_object_server=object_server, extra_conf=None): """ Setup proxy, account, container and object servers using a set of fake rings and policies. :param the_object_server: The object server module to use (optional, defaults to swift.obj.server) :param extra_conf: A dict of config options that will update the basic config passed to all server instances. :returns: A dict containing the following entries: orig_POLICIES: the value of storage_policy.POLICIES prior to it being patched with fake policies orig_SysLogHandler: the value of utils.SysLogHandler prior to it being patched testdir: root directory used for test files test_POLICIES: a StoragePolicyCollection of fake policies test_servers: a tuple of test server instances test_sockets: a tuple of sockets used by test servers test_coros: a tuple of greenthreads in which test servers are running """ context = { "orig_POLICIES": storage_policy._POLICIES, "orig_SysLogHandler": utils.SysLogHandler} utils.HASH_PATH_SUFFIX = b'endcap' utils.SysLogHandler = mock.MagicMock() # Since we're starting up a lot here, we're going to test more than # just chunked puts; we're also going to test parts of # proxy_server.Application we couldn't get to easily otherwise. context["testdir"] = _testdir = \ os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked') mkdirs(_testdir) rmtree(_testdir) for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1', 'sdf1', 'sdg1', 'sdh1', 'sdi1', 'sdj1', 'sdk1', 'sdl1'): mkdirs(os.path.join(_testdir, drive, 'tmp')) conf = {'devices': _testdir, 'swift_dir': _testdir, 'mount_check': 'false', 'allowed_headers': 'content-encoding, x-object-manifest, content-disposition, foo', 'allow_versions': 't'} if extra_conf: conf.update(extra_conf) prolis = listen_zero() acc1lis = listen_zero() acc2lis = listen_zero() con1lis = listen_zero() con2lis = listen_zero() obj1lis = listen_zero() obj2lis = listen_zero() obj3lis = listen_zero() obj4lis = listen_zero() obj5lis = listen_zero() obj6lis = listen_zero() objsocks = [obj1lis, obj2lis, obj3lis, obj4lis, obj5lis, obj6lis] context["test_sockets"] = \ (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis, obj4lis, obj5lis, obj6lis) account_ring_path = os.path.join(_testdir, 'account.ring.gz') account_devs = [ {'port': acc1lis.getsockname()[1]}, {'port': acc2lis.getsockname()[1]}, ] write_fake_ring(account_ring_path, *account_devs) container_ring_path = os.path.join(_testdir, 'container.ring.gz') container_devs = [ {'port': con1lis.getsockname()[1]}, {'port': con2lis.getsockname()[1]}, ] write_fake_ring(container_ring_path, *container_devs) storage_policy._POLICIES = storage_policy.StoragePolicyCollection([ StoragePolicy(0, 'zero', True), StoragePolicy(1, 'one', False), StoragePolicy(2, 'two', False), ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=2, ec_nparity=1, ec_segment_size=4096), ECStoragePolicy(4, 'ec-dup', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=2, ec_nparity=1, ec_segment_size=4096, ec_duplication_factor=2)]) obj_rings = { 0: ('sda1', 'sdb1'), 1: ('sdc1', 'sdd1'), 2: ('sde1', 'sdf1'), # sdg1, sdh1, sdi1 taken by policy 3 (see below) } for policy_index, devices in obj_rings.items(): policy = storage_policy.POLICIES[policy_index] obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz') obj_devs = [ {'port': objsock.getsockname()[1], 'device': dev} for objsock, dev in zip(objsocks, devices)] write_fake_ring(obj_ring_path, *obj_devs) # write_fake_ring can't handle a 3-element ring, and the EC policy needs # at least 6 devs to work with (ec_k=2, ec_m=1, duplication_factor=2), # so we do it manually devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1', 'port': obj1lis.getsockname()[1]}, {'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1', 'port': obj2lis.getsockname()[1]}, {'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1', 'port': obj3lis.getsockname()[1]}, {'id': 3, 'zone': 0, 'device': 'sdj1', 'ip': '127.0.0.1', 'port': obj4lis.getsockname()[1]}, {'id': 4, 'zone': 0, 'device': 'sdk1', 'ip': '127.0.0.1', 'port': obj5lis.getsockname()[1]}, {'id': 5, 'zone': 0, 'device': 'sdl1', 'ip': '127.0.0.1', 'port': obj6lis.getsockname()[1]}] pol3_replica2part2dev_id = [[0, 1, 2, 0], [1, 2, 0, 1], [2, 0, 1, 2]] pol4_replica2part2dev_id = [[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 0], [4, 5, 0, 1], [5, 0, 1, 2]] obj3_ring_path = os.path.join( _testdir, storage_policy.POLICIES[3].ring_name + '.ring.gz') part_shift = 30 with closing(GzipFile(obj3_ring_path, 'wb')) as fh: pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh) obj4_ring_path = os.path.join( _testdir, storage_policy.POLICIES[4].ring_name + '.ring.gz') part_shift = 30 with closing(GzipFile(obj4_ring_path, 'wb')) as fh: pickle.dump(RingData(pol4_replica2part2dev_id, devs, part_shift), fh) prosrv = proxy_server.Application(conf, logger=debug_logger('proxy')) for policy in storage_policy.POLICIES: # make sure all the rings are loaded prosrv.get_object_ring(policy.idx) # don't lose this one! context["test_POLICIES"] = storage_policy._POLICIES acc1srv = account_server.AccountController( conf, logger=debug_logger('acct1')) acc2srv = account_server.AccountController( conf, logger=debug_logger('acct2')) con1srv = container_server.ContainerController( conf, logger=debug_logger('cont1')) con2srv = container_server.ContainerController( conf, logger=debug_logger('cont2')) obj1srv = the_object_server.ObjectController( conf, logger=debug_logger('obj1')) obj2srv = the_object_server.ObjectController( conf, logger=debug_logger('obj2')) obj3srv = the_object_server.ObjectController( conf, logger=debug_logger('obj3')) obj4srv = the_object_server.ObjectController( conf, logger=debug_logger('obj4')) obj5srv = the_object_server.ObjectController( conf, logger=debug_logger('obj5')) obj6srv = the_object_server.ObjectController( conf, logger=debug_logger('obj6')) context["test_servers"] = \ (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) nl = NullLogger() logging_prosv = proxy_logging.ProxyLoggingMiddleware( listing_formats.ListingFilter(prosrv), conf, logger=prosrv.logger) prospa = spawn(wsgi.server, prolis, logging_prosv, nl) acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl) acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl) con1spa = spawn(wsgi.server, con1lis, con1srv, nl) con2spa = spawn(wsgi.server, con2lis, con2srv, nl) obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl) obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl) obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl) obj4spa = spawn(wsgi.server, obj4lis, obj4srv, nl) obj5spa = spawn(wsgi.server, obj5lis, obj5srv, nl) obj6spa = spawn(wsgi.server, obj6lis, obj6srv, nl) context["test_coros"] = \ (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa, obj4spa, obj5spa, obj6spa) # Create account ts = normalize_timestamp(time.time()) partition, nodes = prosrv.account_ring.get_nodes('a') for node in nodes: conn = swift.proxy.controllers.obj.http_connect(node['ip'], node['port'], node['device'], partition, 'PUT', '/a', {'X-Timestamp': ts, 'x-trans-id': 'test'}) resp = conn.getresponse() assert(resp.status == 201) # Create another account # used for account-to-account tests ts = normalize_timestamp(time.time()) partition, nodes = prosrv.account_ring.get_nodes('a1') for node in nodes: conn = swift.proxy.controllers.obj.http_connect(node['ip'], node['port'], node['device'], partition, 'PUT', '/a1', {'X-Timestamp': ts, 'x-trans-id': 'test'}) resp = conn.getresponse() assert(resp.status == 201) # Create containers, 1 per test policy sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\n' 'Content-Length: 0\r\n\r\n') fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % ( exp, headers[:len(exp)]) # Create container in other account # used for account-to-account tests sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\n' 'Content-Length: 0\r\n\r\n') fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % ( exp, headers[:len(exp)]) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write( 'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n' 'Content-Length: 0\r\n\r\n') fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' assert headers[:len(exp)] == exp, \ "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)]) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write( 'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n' 'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n' 'Content-Length: 0\r\n\r\n') fd.flush() headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 201' assert headers[:len(exp)] == exp, \ "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)]) return context
def setup_servers(the_object_server=object_server, extra_conf=None): """ Setup proxy, account, container and object servers using a set of fake rings and policies. :param the_object_server: The object server module to use (optional, defaults to swift.obj.server) :param extra_conf: A dict of config options that will update the basic config passed to all server instances. :returns: A dict containing the following entries: orig_POLICIES: the value of storage_policy.POLICIES prior to it being patched with fake policies orig_SysLogHandler: the value of utils.SysLogHandler prior to it being patched testdir: root directory used for test files test_POLICIES: a StoragePolicyCollection of fake policies test_servers: a tuple of test server instances test_sockets: a tuple of sockets used by test servers test_coros: a tuple of greenthreads in which test servers are running """ context = {"orig_POLICIES": storage_policy._POLICIES, "orig_SysLogHandler": utils.SysLogHandler} utils.HASH_PATH_SUFFIX = "endcap" utils.SysLogHandler = mock.MagicMock() # Since we're starting up a lot here, we're going to test more than # just chunked puts; we're also going to test parts of # proxy_server.Application we couldn't get to easily otherwise. context["testdir"] = _testdir = os.path.join(mkdtemp(), "tmp_test_proxy_server_chunked") mkdirs(_testdir) rmtree(_testdir) for drive in ("sda1", "sdb1", "sdc1", "sdd1", "sde1", "sdf1", "sdg1", "sdh1", "sdi1"): mkdirs(os.path.join(_testdir, drive, "tmp")) conf = { "devices": _testdir, "swift_dir": _testdir, "mount_check": "false", "allowed_headers": "content-encoding, x-object-manifest, content-disposition, foo", "allow_versions": "t", } if extra_conf: conf.update(extra_conf) prolis = listen(("localhost", 0)) acc1lis = listen(("localhost", 0)) acc2lis = listen(("localhost", 0)) con1lis = listen(("localhost", 0)) con2lis = listen(("localhost", 0)) obj1lis = listen(("localhost", 0)) obj2lis = listen(("localhost", 0)) obj3lis = listen(("localhost", 0)) objsocks = [obj1lis, obj2lis, obj3lis] context["test_sockets"] = (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis) account_ring_path = os.path.join(_testdir, "account.ring.gz") account_devs = [{"port": acc1lis.getsockname()[1]}, {"port": acc2lis.getsockname()[1]}] write_fake_ring(account_ring_path, *account_devs) container_ring_path = os.path.join(_testdir, "container.ring.gz") container_devs = [{"port": con1lis.getsockname()[1]}, {"port": con2lis.getsockname()[1]}] write_fake_ring(container_ring_path, *container_devs) storage_policy._POLICIES = storage_policy.StoragePolicyCollection( [ StoragePolicy(0, "zero", True), StoragePolicy(1, "one", False), StoragePolicy(2, "two", False), ECStoragePolicy(3, "ec", ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=2, ec_nparity=1, ec_segment_size=4096), ] ) obj_rings = { 0: ("sda1", "sdb1"), 1: ("sdc1", "sdd1"), 2: ("sde1", "sdf1"), # sdg1, sdh1, sdi1 taken by policy 3 (see below) } for policy_index, devices in obj_rings.items(): policy = storage_policy.POLICIES[policy_index] obj_ring_path = os.path.join(_testdir, policy.ring_name + ".ring.gz") obj_devs = [{"port": objsock.getsockname()[1], "device": dev} for objsock, dev in zip(objsocks, devices)] write_fake_ring(obj_ring_path, *obj_devs) # write_fake_ring can't handle a 3-element ring, and the EC policy needs # at least 3 devs to work with, so we do it manually devs = [ {"id": 0, "zone": 0, "device": "sdg1", "ip": "127.0.0.1", "port": obj1lis.getsockname()[1]}, {"id": 1, "zone": 0, "device": "sdh1", "ip": "127.0.0.1", "port": obj2lis.getsockname()[1]}, {"id": 2, "zone": 0, "device": "sdi1", "ip": "127.0.0.1", "port": obj3lis.getsockname()[1]}, ] pol3_replica2part2dev_id = [[0, 1, 2, 0], [1, 2, 0, 1], [2, 0, 1, 2]] obj3_ring_path = os.path.join(_testdir, storage_policy.POLICIES[3].ring_name + ".ring.gz") part_shift = 30 with closing(GzipFile(obj3_ring_path, "wb")) as fh: pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh) prosrv = proxy_server.Application(conf, logger=debug_logger("proxy")) for policy in storage_policy.POLICIES: # make sure all the rings are loaded prosrv.get_object_ring(policy.idx) # don't lose this one! context["test_POLICIES"] = storage_policy._POLICIES acc1srv = account_server.AccountController(conf, logger=debug_logger("acct1")) acc2srv = account_server.AccountController(conf, logger=debug_logger("acct2")) con1srv = container_server.ContainerController(conf, logger=debug_logger("cont1")) con2srv = container_server.ContainerController(conf, logger=debug_logger("cont2")) obj1srv = the_object_server.ObjectController(conf, logger=debug_logger("obj1")) obj2srv = the_object_server.ObjectController(conf, logger=debug_logger("obj2")) obj3srv = the_object_server.ObjectController(conf, logger=debug_logger("obj3")) context["test_servers"] = (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv) nl = NullLogger() logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf, logger=prosrv.logger) prospa = spawn(wsgi.server, prolis, logging_prosv, nl) acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl) acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl) con1spa = spawn(wsgi.server, con1lis, con1srv, nl) con2spa = spawn(wsgi.server, con2lis, con2srv, nl) obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl) obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl) obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl) context["test_coros"] = (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa) # Create account ts = normalize_timestamp(time.time()) partition, nodes = prosrv.account_ring.get_nodes("a") for node in nodes: conn = swift.proxy.controllers.obj.http_connect( node["ip"], node["port"], node["device"], partition, "PUT", "/a", {"X-Timestamp": ts, "x-trans-id": "test"} ) resp = conn.getresponse() assert resp.status == 201 # Create another account # used for account-to-account tests ts = normalize_timestamp(time.time()) partition, nodes = prosrv.account_ring.get_nodes("a1") for node in nodes: conn = swift.proxy.controllers.obj.http_connect( node["ip"], node["port"], node["device"], partition, "PUT", "/a1", {"X-Timestamp": ts, "x-trans-id": "test"} ) resp = conn.getresponse() assert resp.status == 201 # Create containers, 1 per test policy sock = connect_tcp(("localhost", prolis.getsockname()[1])) fd = sock.makefile() fd.write( "PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n" "Connection: close\r\nX-Auth-Token: t\r\n" "Content-Length: 0\r\n\r\n" ) fd.flush() headers = readuntil2crlfs(fd) exp = "HTTP/1.1 201" assert headers[: len(exp)] == exp, "Expected '%s', encountered '%s'" % (exp, headers[: len(exp)]) # Create container in other account # used for account-to-account tests sock = connect_tcp(("localhost", prolis.getsockname()[1])) fd = sock.makefile() fd.write( "PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n" "Connection: close\r\nX-Auth-Token: t\r\n" "Content-Length: 0\r\n\r\n" ) fd.flush() headers = readuntil2crlfs(fd) exp = "HTTP/1.1 201" assert headers[: len(exp)] == exp, "Expected '%s', encountered '%s'" % (exp, headers[: len(exp)]) sock = connect_tcp(("localhost", prolis.getsockname()[1])) fd = sock.makefile() fd.write( "PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n" "Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n" "Content-Length: 0\r\n\r\n" ) fd.flush() headers = readuntil2crlfs(fd) exp = "HTTP/1.1 201" assert headers[: len(exp)] == exp, "Expected '%s', encountered '%s'" % (exp, headers[: len(exp)]) sock = connect_tcp(("localhost", prolis.getsockname()[1])) fd = sock.makefile() fd.write( "PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n" "Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n" "Content-Length: 0\r\n\r\n" ) fd.flush() headers = readuntil2crlfs(fd) exp = "HTTP/1.1 201" assert headers[: len(exp)] == exp, "Expected '%s', encountered '%s'" % (exp, headers[: len(exp)]) return context