def connect(self): """ Establishes a connection and starts an SSYNC request with the object server. """ with exceptions.MessageTimeout(self.daemon.conn_timeout, 'connect send'): self.connection = bufferedhttp.BufferedHTTPConnection( '%s:%s' % (self.node['replication_ip'], self.node['replication_port'])) self.connection.putrequest( 'SSYNC', '/%s/%s' % (self.node['device'], self.job['partition'])) self.connection.putheader('Transfer-Encoding', 'chunked') self.connection.putheader('X-Backend-Storage-Policy-Index', int(self.job['policy'])) # a sync job must use the node's index for the frag_index of the # rebuilt fragments instead of the frag_index from the job which # will be rebuilding them self.connection.putheader( 'X-Backend-Ssync-Frag-Index', self.node.get('index', self.job.get('frag_index', ''))) # a revert job to a handoff will not have a node index self.connection.putheader('X-Backend-Ssync-Node-Index', self.node.get('index', '')) self.connection.endheaders() with exceptions.MessageTimeout(self.daemon.node_timeout, 'connect receive'): self.response = self.connection.getresponse() if self.response.status != http.HTTP_OK: self.response.read() raise exceptions.ReplicationException( 'Expected status %s; got %s' % (http.HTTP_OK, self.response.status))
def connect(self): """ Establishes a connection and starts an SSYNC request with the object server. """ with exceptions.MessageTimeout(self.daemon.conn_timeout, 'connect send'): self.connection = bufferedhttp.BufferedHTTPConnection( '%s:%s' % (self.node['replication_ip'], self.node['replication_port'])) self.connection.putrequest( 'SSYNC', '/%s/%s' % (self.node['device'], self.job['partition'])) self.connection.putheader('Transfer-Encoding', 'chunked') self.connection.putheader('X-Backend-Storage-Policy-Index', int(self.job['policy'])) self.connection.putheader('X-Backend-Ssync-Frag-Index', self.node['index']) self.connection.endheaders() with exceptions.MessageTimeout(self.daemon.node_timeout, 'connect receive'): self.response = self.connection.getresponse() if self.response.status != http.HTTP_OK: raise exceptions.ReplicationException( 'Expected status %s; got %s' % (http.HTTP_OK, self.response.status))
def forward_raw_swift_req(swift_baseurl, req, logger, object_chunk_size): # logger.info('swift_baseurl: %s' % swift_baseurl) scheme, netloc, _, _, _ = urlsplit(swift_baseurl) ssl = (scheme == 'https') swift_host, swift_port = utils.parse_socket_string(netloc, 443 if ssl else 80) swift_port = int(swift_port) if ssl: conn = bufferedhttp.HTTPSConnection(swift_host, port=swift_port) else: conn = bufferedhttp.BufferedHTTPConnection(swift_host, port=swift_port) conn.path = req.path_qs conn.putrequest(req.method, req.path_qs, skip_host=True) proxy_satellite_host = '' for header, value in filter_hop_by_hop_headers(req.headers.items()): if header.lower() == 'host': proxy_satellite_host = value continue conn.putheader(header, str(value)) conn.putheader('Host', str(swift_host)) conn.endheaders() content_length = int(req.headers.get('content-length', '0')) if content_length != 0: chunk = req.body_file.read(object_chunk_size) while chunk: conn.send(chunk) chunk = req.body_file.read(object_chunk_size) resp = conn.getresponse() headers = dict(filter_hop_by_hop_headers(resp.getheaders())) if 'x-storage-url' in headers: swift_scheme, swift_netloc, swift_path, _, _ = \ urlsplit(headers['x-storage-url']) headers['x-storage-url'] = \ swift_scheme+"://"+proxy_satellite_host+swift_path body_len = 0 if req.method == 'HEAD' \ else int(headers.get('content-length', "0")) app_iter = ClosingResourceIterable(resource=conn, data_src=resp, length=body_len) return swob.Response(app_iter=app_iter, status=resp.status, headers=headers, request=req)
def connect(self): """ Establishes a connection and starts a REPLICATION request with the object server. """ with exceptions.MessageTimeout(self.daemon.conn_timeout, 'connect send'): self.connection = bufferedhttp.BufferedHTTPConnection( '%s:%s' % (self.node['ip'], self.node['port'])) self.connection.putrequest( 'REPLICATION', '/%s/%s' % (self.node['device'], self.job['partition'])) self.connection.putheader('Transfer-Encoding', 'chunked') self.connection.endheaders() with exceptions.MessageTimeout(self.daemon.node_timeout, 'connect receive'): self.response = self.connection.getresponse() if self.response.status != http.HTTP_OK: raise exceptions.ReplicationException( 'Expected status %s; got %s' % (http.HTTP_OK, self.response.status))
def connect(self): """ Establishes a connection and starts an SSYNC request with the object server. """ with exceptions.MessageTimeout(self.daemon.conn_timeout, 'connect send'): self.connection = bufferedhttp.BufferedHTTPConnection( '%s:%s' % (self.node['replication_ip'], self.node['replication_port'])) self.connection.putrequest( 'SSYNC', '/%s/%s' % (self.node['device'], self.job['partition'])) self.connection.putheader('Transfer-Encoding', 'chunked') self.connection.putheader('X-Backend-Storage-Policy-Index', int(self.job['policy'])) # a sync job must use the node's index for the frag_index of the # rebuilt fragments instead of the frag_index from the job which # will be rebuilding them frag_index = self.node.get('index', self.job.get('frag_index')) if frag_index is None: # replication jobs will not have a frag_index key; # reconstructor jobs with only tombstones will have a # frag_index key explicitly set to the value of None - in both # cases on the wire we write the empty string which # ssync_receiver will translate to None frag_index = '' self.connection.putheader('X-Backend-Ssync-Frag-Index', frag_index) # a revert job to a handoff will not have a node index self.connection.putheader('X-Backend-Ssync-Node-Index', self.node.get('index', '')) self.connection.endheaders() with exceptions.MessageTimeout(self.daemon.node_timeout, 'connect receive'): self.response = self.connection.getresponse() if self.response.status != http.HTTP_OK: err_msg = self.response.read()[:1024] raise exceptions.ReplicationException( 'Expected status %s; got %s (%s)' % (http.HTTP_OK, self.response.status, err_msg))
def test_get_expect(self): bindsock = listen_zero() request = [] def accept(): with Timeout(3): sock, addr = bindsock.accept() fp = sock.makefile('rwb') request.append(fp.readline()) fp.write(b'HTTP/1.1 100 Continue\r\n\r\n') fp.flush() fp.write(b'HTTP/1.1 200 OK\r\nContent-Length: 8\r\n\r\n' b'RESPONSE') fp.flush() server = spawn(accept) try: address = '%s:%s' % ('127.0.0.1', bindsock.getsockname()[1]) conn = bufferedhttp.BufferedHTTPConnection(address) conn.putrequest('GET', '/path') conn.endheaders() resp = conn.getexpect() self.assertIsInstance(resp, bufferedhttp.BufferedHTTPResponse) self.assertEqual(resp.status, 100) self.assertEqual(resp.version, 11) self.assertEqual(resp.reason, 'Continue') # I don't think you're supposed to "read" a continue response self.assertRaises(AssertionError, resp.read) resp = conn.getresponse() self.assertIsInstance(resp, bufferedhttp.BufferedHTTPResponse) self.assertEqual(resp.read(), b'RESPONSE') finally: server.wait() self.assertEqual(request[0], b'GET /path HTTP/1.1\r\n')