def _delete(self, key): """ Deletes a key/value pair from memcache. :param key: key to be deleted """ orig_key = key key = memcached.md5hash(key) for (server, fp, sock) in self._get_conns(key): with api.ezipkin_client_span( api.default_service_name(), span_name='delete', binary_annotations={ "memcached.key": orig_key, }, ) as zipkin_span: add_remote_endpoint(zipkin_span, server) try: with Timeout(self._io_timeout): sock.sendall(b'delete ' + key + b'\r\n') # Wait for the delete to complete fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp)
def _patched_endheaders(self): # self is a HTTPConnection if api.has_default_tracer(): span_ctx = api.ezipkin_client_span( api.default_service_name(), span_name=self._method, binary_annotations={'http.uri': self.path}, ) span_ctx.start() remote_service_name = 'unknown' try: path_bits = self.path.split('/', 5)[1:] if path_bits[0].startswith('d') and path_bits[0][1:].isdigit(): if self.port in (6002, 6005): remote_service_name = 'swift-account-server' elif self.port in (6001, 6004): remote_service_name = 'swift-container-server' else: remote_service_name = 'swift-object-server' except Exception: pass span_ctx.add_remote_endpoint(host=self.host, port=self.port, service_name=remote_service_name) b3_headers = span_ctx.create_http_headers_for_my_span() for h, v in b3_headers.items(): self.putheader(h, v) __org_endheaders__(self) if api.has_default_tracer(): span_ctx._fd_key = self.sock.fileno() _span_contexts_by_fd[span_ctx._fd_key] = [span_ctx, True]
def _get_multi(self, keys, server_key): """ Gets multiple values from memcache for the given keys. :param keys: keys for values to be retrieved from memcache :param server_key: key to use in determining which server in the ring is used :returns: list of values """ orig_key = server_key orig_keys = keys server_key = memcached.md5hash(server_key) keys = [memcached.md5hash(key) for key in keys] for (server, fp, sock) in self._get_conns(server_key): with api.ezipkin_client_span( api.default_service_name(), span_name='get_multi', binary_annotations={ "memcached.key": orig_key, "memcached.keys": ",".join(orig_keys), }, ) as zipkin_span: add_remote_endpoint(zipkin_span, server) try: with Timeout(self._io_timeout): sock.sendall(b'get ' + b' '.join(keys) + b'\r\n') line = fp.readline().strip().split() responses = {} while True: if not line: raise memcached.MemcacheConnectionError( 'incomplete read') if line[0].upper() == b'END': break if line[0].upper() == b'VALUE': size = int(line[3]) value = fp.read(size) if int(line[2]) & memcached.PICKLE_FLAG: if self._allow_unpickle: value = pickle.loads(value) else: value = None elif int(line[2]) & memcached.JSON_FLAG: value = json.loads(value) responses[line[1]] = value fp.readline() line = fp.readline().strip().split() values = [] for key in keys: if key in responses: values.append(responses[key]) else: values.append(None) self._return_conn(server, fp, sock) return values except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp)
def _set_multi(self, mapping, server_key, serialize=True, time=0, min_compress_len=0): """ Sets multiple key/value pairs in memcache. :param mapping: dictionary of keys and values to be set in memcache :param server_key: key to use in determining which server in the ring is used :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) :param time: the time to live :min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it """ orig_key = server_key server_key = memcached.md5hash(server_key) timeout = memcached.sanitize_timeout(time) msg = [] for key, value in mapping.items(): key = memcached.md5hash(key) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, memcached.PICKLE_PROTOCOL) flags |= memcached.PICKLE_FLAG elif serialize: if isinstance(value, bytes): value = value.decode('utf8') value = json.dumps(value).encode('ascii') flags |= memcached.JSON_FLAG msg.append(memcached.set_msg(key, flags, timeout, value)) for (server, fp, sock) in self._get_conns(server_key): with api.ezipkin_client_span( api.default_service_name(), span_name='set_multi', binary_annotations={ "memcached.key": orig_key, "memcached.keys": ",".join(mapping), }, ) as zipkin_span: add_remote_endpoint(zipkin_span, server) try: with Timeout(self._io_timeout): sock.sendall(b''.join(msg)) # Wait for the set to complete for line in range(len(mapping)): fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp)
def _set(self, key, value, serialize=True, time=0, min_compress_len=0): """ Set a key/value pair in memcache :param key: key :param value: value :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) :param time: the time to live :param min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it. """ orig_key = key key = memcached.md5hash(key) timeout = memcached.sanitize_timeout(time) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, memcached.PICKLE_PROTOCOL) flags |= memcached.PICKLE_FLAG elif serialize: if isinstance(value, bytes): value = value.decode('utf8') value = json.dumps(value).encode('ascii') flags |= memcached.JSON_FLAG elif not isinstance(value, bytes): value = str(value).encode('utf-8') for (server, fp, sock) in self._get_conns(key): with api.ezipkin_client_span( api.default_service_name(), span_name='set', binary_annotations={ "memcached.key": orig_key, }, ) as zipkin_span: add_remote_endpoint(zipkin_span, server) try: with Timeout(self._io_timeout): sock.sendall(memcached.set_msg(key, flags, timeout, value)) # Wait for the set to complete fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp)
def _get(self, key): """ Gets the object specified by key. It will also unserialize the object before returning if it is serialized in memcache with JSON, or if it is pickled and unpickling is allowed. :param key: key :returns: value of the key in memcache """ orig_key = key key = memcached.md5hash(key) value = None for (server, fp, sock) in self._get_conns(key): with api.ezipkin_client_span( api.default_service_name(), span_name='get', binary_annotations={ "memcached.key": orig_key, }, ) as zipkin_span: add_remote_endpoint(zipkin_span, server) try: with Timeout(self._io_timeout): sock.sendall(b'get ' + key + b'\r\n') line = fp.readline().strip().split() while True: if not line: raise memcached.MemcacheConnectionError( 'incomplete read') if line[0].upper() == b'END': break if line[0].upper() == b'VALUE' and line[1] == key: size = int(line[3]) value = fp.read(size) if int(line[2]) & memcached.PICKLE_FLAG: if self._allow_unpickle: value = pickle.loads(value) else: value = None elif int(line[2]) & memcached.JSON_FLAG: value = json.loads(value) fp.readline() line = fp.readline().strip().split() self._return_conn(server, fp, sock) return value except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp)
def _incr(self, key, delta=1, time=0): """ Increments a key which has a numeric value by delta. If the key can't be found, it's added as delta or 0 if delta < 0. If passed a negative number, will use memcached's decr. Returns the int stored in memcached Note: The data memcached stores as the result of incr/decr is an unsigned int. decr's that result in a number below 0 are stored as 0. :param key: key :param delta: amount to add to the value of key (or set as the value if the key is not found) will be cast to an int :param time: the time to live :returns: result of incrementing :raises MemcacheConnectionError: """ orig_key = key key = memcached.md5hash(key) command = b'incr' if delta < 0: command = b'decr' delta = str(abs(int(delta))).encode('ascii') timeout = memcached.sanitize_timeout(time) if delta >= 0: span_name = 'incr' else: span_name = 'decr' for (server, fp, sock) in self._get_conns(key): with api.ezipkin_client_span( api.default_service_name(), span_name=span_name, binary_annotations={ "memcached.key": orig_key, }, ) as zipkin_span: add_remote_endpoint(zipkin_span, server) try: with Timeout(self._io_timeout): sock.sendall(b' '.join([command, key, delta]) + b'\r\n') line = fp.readline().strip().split() if not line: raise memcached.MemcacheConnectionError( 'incomplete read') if line[0].upper() == b'NOT_FOUND': add_val = delta if command == b'decr': add_val = b'0' sock.sendall(b' '.join([ b'add', key, b'0', str(timeout).encode('ascii'), str(len(add_val)).encode('ascii') ]) + b'\r\n' + add_val + b'\r\n') line = fp.readline().strip().split() if line[0].upper() == b'NOT_STORED': sock.sendall(b' '.join([command, key, delta]) + b'\r\n') line = fp.readline().strip().split() ret = int(line[0].strip()) else: ret = int(add_val) else: ret = int(line[0].strip()) self._return_conn(server, fp, sock) return ret except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) raise memcached.MemcacheConnectionError( "No Memcached connections succeeded.")
def _patched_handle_one_response(self): zipkin_attrs = api.extract_zipkin_attrs_from_headers( self.headers, sample_rate=api.sample_rate_pct, use_128bit_trace_id=True) binary_annotations = { "http.uri": self.path, "worker.pid": str(os.getpid()), } local_ip, local_port = self.request.getsockname()[:2] # PROXY proto proxy_address will be better to use for "this server" than # the raw values from the socket. proxy_proto_proxy_ip, proxy_proto_proxy_port = getattr( self, 'proxy_address', (None, None)) if proxy_proto_proxy_ip: local_ip = proxy_proto_proxy_ip local_port = int(proxy_proto_proxy_port) # Get the best client IP/port we can proxy_proto_client_ip, proxy_proto_client_port = getattr( self, 'client_address', (None, None)) raw_peer_ip, raw_peer_port = self.request.getpeername()[:2] forwarded_for = self.headers.get('X-Forwarded-For') if proxy_proto_client_ip: # Try results of PROXY protocol first client_ip = proxy_proto_client_ip client_port = int(proxy_proto_client_port) elif forwarded_for: # Fallback on standard X-Forwarded-For client_ip = forwarded_for.split(',')[0].strip() client_port = raw_peer_port else: # Failing all that, just use the other end of the raw socket. client_ip = raw_peer_ip client_port = raw_peer_port with api.ezipkin_server_span( service_name=api.default_service_name(), span_name=self.command, zipkin_attrs=zipkin_attrs, sample_rate=None if zipkin_attrs else api.sample_rate_pct, host=local_ip, port=local_port, binary_annotations=binary_annotations, ) as zipkin_span: # For swift servers, extract a canonical service name and PID from the # User-Agent header. user_agent = self.headers.get('User-Agent', 'unknown') match = re.match(r'([a-zA-z]+-server) (\d+)$', user_agent) if match: user_agent = 'swift-' + match.group(1) zipkin_span.update_binary_annotations({ 'client.pid': int(match.group(2)), }) zipkin_span.add_remote_endpoint(client_port, user_agent, client_ip) # Add in a hook to snarf out the response status self.environ['eventlet.posthooks'].append( (_extract_status_code, (zipkin_span, ), {}), ) __original_handle_one_response__(self) # If we're a root span, see if we can extract a Swift transaction ID to # associate with this (one-per-trace) root span. We don't track it on # every span because Zipkin's trace_id/span_id/parent_id values already # link everything together and more copies of the swift txid would just # waste space. SWIFT_TRANS_ID_KEY = 'swift.trans_id' if not zipkin_span.zipkin_attrs.parent_span_id: if SWIFT_TRANS_ID_KEY in self.environ: zipkin_span.update_binary_annotations({ SWIFT_TRANS_ID_KEY: self.environ[SWIFT_TRANS_ID_KEY], })