Exemplo n.º 1
0
 def test_chr_add_node_duplicate(self):
     hosts = [("127.0.0.1", "cache0"),("127.0.0.1", "cache1"),("127.0.0.1", "cache2")]
     hashring = ConsistentHashRing(hosts)
     self.assertEqual(hashring.nodes, set(hosts))
     hashring.add_node(("127.0.0.1", "cache2"))
     self.assertEqual(hashring.nodes, set(hosts))
     self.assertEqual(hashring.nodes_len, 3)
Exemplo n.º 2
0
 def test_chr_remove_node_missing(self):
     hosts = [("127.0.0.1", "cache0"),("127.0.0.1", "cache1"),("127.0.0.1", "cache2")]
     hashring = ConsistentHashRing(hosts)
     self.assertEqual(hashring.nodes, set(hosts))
     hashring.remove_node(("127.0.0.1", "cache4"))
     self.assertEqual(hashring.nodes, set(hosts))
     self.assertEqual(hashring.nodes_len, 3)
Exemplo n.º 3
0
 def test_chr_get_node_fnv1a(self):
     hosts = [("127.0.0.1", "ba603c36342304ed77953f84ac4d357b"), ("127.0.0.2", "5dd63865534f84899c6e5594dba6749a"),
              ("127.0.0.3", "866a18b81f2dc4649517a1df13e26f28")]
     hashring = ConsistentHashRing(hosts, hash_type='fnv1a_ch')
     self.assertEqual(hashring.get_node('hosts.worker1.cpu'),
                      ('127.0.0.1', 'ba603c36342304ed77953f84ac4d357b'))
     self.assertEqual(hashring.get_node('hosts.worker2.cpu'),
                      ('127.0.0.3', '866a18b81f2dc4649517a1df13e26f28'))
Exemplo n.º 4
0
 def test_chr_compute_ring_position(self):
     hosts = [("127.0.0.1", "cache0"), ("127.0.0.1", "cache1"),
              ("127.0.0.1", "cache2")]
     hashring = ConsistentHashRing(hosts)
     self.assertEqual(hashring.compute_ring_position('hosts.worker1.cpu'),
                      64833)
     self.assertEqual(hashring.compute_ring_position('hosts.worker2.cpu'),
                      38509)
Exemplo n.º 5
0
 def test_chr_get_nodes(self):
     hosts = [("127.0.0.1", "cache0"), ("127.0.0.1", "cache1"),
              ("127.0.0.1", "cache2")]
     hashring = ConsistentHashRing(hosts)
     node = hashring.get_nodes('hosts.worker1.cpu')
     self.assertEqual(node,
                      [('127.0.0.1', 'cache2'), ('127.0.0.1', 'cache0'),
                       ('127.0.0.1', 'cache1')])
Exemplo n.º 6
0
 def test_chr_remove_node_missing(self):
     hosts = [("127.0.0.1", "cache0"), ("127.0.0.1", "cache1"),
              ("127.0.0.1", "cache2")]
     hashring = ConsistentHashRing(hosts)
     self.assertEqual(hashring.nodes, set(hosts))
     hashring.remove_node(("127.0.0.1", "cache4"))
     self.assertEqual(hashring.nodes, set(hosts))
     self.assertEqual(hashring.nodes_len, 3)
Exemplo n.º 7
0
 def test_chr_add_node_duplicate(self):
     hosts = [("127.0.0.1", "cache0"), ("127.0.0.1", "cache1"),
              ("127.0.0.1", "cache2")]
     hashring = ConsistentHashRing(hosts)
     self.assertEqual(hashring.nodes, set(hosts))
     hashring.add_node(("127.0.0.1", "cache2"))
     self.assertEqual(hashring.nodes, set(hosts))
     self.assertEqual(hashring.nodes_len, 3)
Exemplo n.º 8
0
 def test_chr_compute_ring_position_fnv1a(self):
     hosts = [("127.0.0.1", "ba603c36342304ed77953f84ac4d357b"),
              ("127.0.0.2", "5dd63865534f84899c6e5594dba6749a"),
              ("127.0.0.3", "866a18b81f2dc4649517a1df13e26f28")]
     hashring = ConsistentHashRing(hosts, hash_type='fnv1a_ch')
     self.assertEqual(hashring.compute_ring_position('hosts.worker1.cpu'),
                      59573)
     self.assertEqual(hashring.compute_ring_position('hosts.worker2.cpu'),
                      35749)
Exemplo n.º 9
0
 def test_chr_get_node_fnv1a(self):
     hosts = [("127.0.0.1", "ba603c36342304ed77953f84ac4d357b"),
              ("127.0.0.2", "5dd63865534f84899c6e5594dba6749a"),
              ("127.0.0.3", "866a18b81f2dc4649517a1df13e26f28")]
     hashring = ConsistentHashRing(hosts, hash_type='fnv1a_ch')
     self.assertEqual(hashring.get_node('hosts.worker1.cpu'),
                      ('127.0.0.1', 'ba603c36342304ed77953f84ac4d357b'))
     self.assertEqual(hashring.get_node('hosts.worker2.cpu'),
                      ('127.0.0.3', '866a18b81f2dc4649517a1df13e26f28'))
Exemplo n.º 10
0
 def __init__(self, hosts, timeout):
     self.hosts = [(server, instance) for (server, port, instance) in hosts]
     self.ports = dict(
         ((server, instance), port) for (server, port, instance) in hosts)
     self.timeout = float(timeout)
     self.hashRing = ConsistentHashRing(self.hosts)
     self.connections = {}
     # Create a connection pool for each host
     for host in self.hosts:
         self.connections[host] = set()
Exemplo n.º 11
0
  def __init__(self, hosts, timeout):
    self.hosts = [ (server, instance) for (server, port, instance) in hosts ]
    self.ports = dict( ((server, instance), port) for (server, port, instance) in hosts )
    self.timeout = float(timeout)
    servers = set([server for (server, port, instance) in hosts])
    if len(servers) < settings.REPLICATION_FACTOR:
      raise Exception("REPLICATION_FACTOR=%d cannot exceed servers=%d" % (settings.REPLICATION_FACTOR, len(servers)))

    self.hash_ring = ConsistentHashRing(self.hosts)
    self.keyfunc = load_keyfunc()
    self.connections = {}
    self.last_failure = {}
    # Create a connection pool for each host
    for host in self.hosts:
      self.connections[host] = set()
Exemplo n.º 12
0
 def __init__(self, hosts, timeout):
   self.hosts = [ (server, instance) for (server, port, instance) in hosts ]
   self.ports = dict( ((server, instance), port) for (server, port, instance) in hosts )
   self.timeout = float(timeout)
   self.hash_ring = ConsistentHashRing(self.hosts)
   self.connections = {}
   self.last_failure = {}
   # Create a connection pool for each host
   for host in self.hosts:
     self.connections[host] = set()
Exemplo n.º 13
0
  def __init__(self, hosts, timeout):
    self.hosts = [ (server, instance) for (server, port, instance) in hosts ]
    self.ports = dict( ((server, instance), port) for (server, port, instance) in hosts )
    self.timeout = float(timeout)
    servers = set([server for (server, port, instance) in hosts])
    if len(servers) < settings.REPLICATION_FACTOR:
      raise Exception("REPLICATION_FACTOR=%d cannot exceed servers=%d" % (settings.REPLICATION_FACTOR, len(servers)))

    self.hash_ring = ConsistentHashRing(self.hosts)
    self.keyfunc = load_keyfunc()
    self.connections = {}
    self.last_failure = {}
    # Create a connection pool for each host
    for host in self.hosts:
      self.connections[host] = set()
Exemplo n.º 14
0
class CarbonLinkPool:
  def __init__(self, hosts, timeout):
    self.hosts = [ (server, instance) for (server, port, instance) in hosts ]
    self.ports = dict( ((server, instance), port) for (server, port, instance) in hosts )
    self.timeout = float(timeout)
    servers = set([server for (server, port, instance) in hosts])
    if len(servers) < settings.REPLICATION_FACTOR:
      raise Exception("REPLICATION_FACTOR=%d cannot exceed servers=%d" % (settings.REPLICATION_FACTOR, len(servers)))

    self.hash_ring = ConsistentHashRing(self.hosts)
    self.keyfunc = load_keyfunc()
    self.connections = {}
    self.last_failure = {}
    # Create a connection pool for each host
    for host in self.hosts:
      self.connections[host] = set()

  def select_host(self, metric):
    "Returns the carbon host that has data for the given metric"
    key = self.keyfunc(metric)
    nodes = []
    servers = set()
    for node in self.hash_ring.get_nodes(key):
      (server, instance) = node
      if server in servers:
        continue
      servers.add(server)
      nodes.append(node)
      if len(servers) >= settings.REPLICATION_FACTOR:
        break

    available = [ n for n in nodes if self.is_available(n) ]
    return random.choice(available or nodes)

  def is_available(self, host):
    now = time.time()
    last_fail = self.last_failure.get(host, 0)
    return (now - last_fail) < settings.CARBONLINK_RETRY_DELAY

  def get_connection(self, host):
    # First try to take one out of the pool for this host
    (server, instance) = host
    port = self.ports[host]
    connectionPool = self.connections[host]
    try:
      return connectionPool.pop()
    except KeyError:
      pass #nothing left in the pool, gotta make a new connection

    log.cache("CarbonLink creating a new socket for %s" % str(host))
    connection = socket.socket()
    connection.settimeout(self.timeout)
    try:
      connection.connect( (server, port) )
    except:
      self.last_failure[host] = time.time()
      raise
    else:
      connection.setsockopt( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1 )
      return connection

  def query(self, metric):
    request = dict(type='cache-query', metric=metric)
    results = self.send_request(request)
    log.cache("CarbonLink cache-query request for %s returned %d datapoints" % (metric, len(results['datapoints'])))
    return results['datapoints']

  def get_metadata(self, metric, key):
    request = dict(type='get-metadata', metric=metric, key=key)
    results = self.send_request(request)
    log.cache("CarbonLink get-metadata request received for %s:%s" % (metric, key))
    return results['value']

  def set_metadata(self, metric, key, value):
    request = dict(type='set-metadata', metric=metric, key=key, value=value)
    results = self.send_request(request)
    log.cache("CarbonLink set-metadata request received for %s:%s" % (metric, key))
    return results

  def send_request(self, request):
    metric = request['metric']
    serialized_request = pickle.dumps(request, protocol=-1)
    len_prefix = struct.pack("!L", len(serialized_request))
    request_packet = len_prefix + serialized_request
    result = {}
    result.setdefault('datapoints', [])

    if metric.startswith(settings.CARBON_METRIC_PREFIX):
      return self.send_request_to_all(request)

    host = self.select_host(metric)
    conn = self.get_connection(host)
    log.cache("CarbonLink sending request for %s to %s" % (metric, str(host)))
    try:
      conn.sendall(request_packet)
      result = self.recv_response(conn)
    except Exception,e:
      self.last_failure[host] = time.time()
      log.cache("Exception getting data from cache %s: %s" % (str(host), e))
    else:
Exemplo n.º 15
0
class CarbonLinkPool:
  def __init__(self, hosts, timeout):
    self.hosts = [ (server, instance) for (server, port, instance) in hosts ]
    self.ports = dict( ((server, instance), port) for (server, port, instance) in hosts )
    self.timeout = float(timeout)
    self.hash_ring = ConsistentHashRing(self.hosts)
    self.connections = {}
    self.last_failure = {}
    # Create a connection pool for each host
    for host in self.hosts:
      self.connections[host] = set()

  def select_host(self, metric):
    "Returns the carbon host that has data for the given metric"
    return self.hash_ring.get_node(metric)

  def get_connection(self, host):
    # First try to take one out of the pool for this host
    (server, instance) = host
    port = self.ports[host]
    connectionPool = self.connections[host]
    try:
      return connectionPool.pop()
    except KeyError:
      pass #nothing left in the pool, gotta make a new connection

    log.cache("CarbonLink creating a new socket for %s" % str(host))
    connection = socket.socket()
    connection.settimeout(self.timeout)
    try:
      connection.connect( (server, port) )
    except:
      self.last_failure[host] = time.time()
      raise
    else:
      connection.setsockopt( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1 )
      return connection

  def query(self, metric):
    request = dict(type='cache-query', metric=metric)
    results = self.send_request(request)
    log.cache("CarbonLink cache-query request for %s returned %d datapoints" % (metric, len(results)))
    return results['datapoints']

  def get_metadata(self, metric, key):
    request = dict(type='get-metadata', metric=metric, key=key)
    results = self.send_request(request)
    log.cache("CarbonLink get-metadata request received for %s:%s" % (metric, key))
    return results['value']

  def set_metadata(self, metric, key, value):
    request = dict(type='set-metadata', metric=metric, key=key, value=value)
    results = self.send_request(request)
    log.cache("CarbonLink set-metadata request received for %s:%s" % (metric, key))
    return results

  def send_request(self, request):
    metric = request['metric']
    serialized_request = pickle.dumps(request, protocol=-1)
    len_prefix = struct.pack("!L", len(serialized_request))
    request_packet = len_prefix + serialized_request

    host = self.select_host(metric)
    conn = self.get_connection(host)
    try:
      conn.sendall(request_packet)
      result = self.recv_response(conn)
    except:
      self.last_failure[host] = time.time()
      raise
    else:
      self.connections[host].add(conn)
      if 'error' in result:
        raise CarbonLinkRequestError(result['error'])
      else:
        return result

  def recv_response(self, conn):
    len_prefix = recv_exactly(conn, 4)
    body_size = struct.unpack("!L", len_prefix)[0]
    body = recv_exactly(conn, body_size)
    return pickle.loads(body)
Exemplo n.º 16
0
 def test_chr_compute_ring_position(self):
     hosts = [("127.0.0.1", "cache0"),("127.0.0.1", "cache1"),("127.0.0.1", "cache2")]
     hashring = ConsistentHashRing(hosts)
     self.assertEqual(hashring.compute_ring_position('hosts.worker1.cpu'), 64833)
     self.assertEqual(hashring.compute_ring_position('hosts.worker2.cpu'), 38509)
Exemplo n.º 17
0
class CarbonLinkPool:
    def __init__(self, hosts, timeout):
        self.hosts = [(server, instance) for (server, port, instance) in hosts]
        self.ports = dict(
            ((server, instance), port) for (server, port, instance) in hosts)
        self.timeout = float(timeout)
        self.hashRing = ConsistentHashRing(self.hosts)
        self.connections = {}
        # Create a connection pool for each host
        for host in self.hosts:
            self.connections[host] = set()

    def selectHost(self, metric):
        "Returns the carbon host that has data for the given metric"
        return self.hashRing.get_node(metric)

    def getConnection(self, host):
        # First try to take one out of the pool for this host
        (server, instance) = host
        port = self.ports[host]
        connectionPool = self.connections[host]
        try:
            return connectionPool.pop()
        except KeyError:
            pass  #nothing left in the pool, gotta make a new connection

        log.cache("CarbonLink creating a new socket for %s" % str(host))
        connection = socket.socket()
        connection.settimeout(self.timeout)
        connection.connect((server, port))
        connection.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
        return connection

    def putConnectionInPool(self, host, connection):
        connectionPool = self.connections[host]
        connectionPool.add(connection)

    def removeConnectionFromPool(self, host, connection):
        connectionPool = self.connections.get(host, set())
        connectionPool.discard(connection)

    def sendRequest(self, metric):
        "Sends a request and returns a completion callback"
        host = self.selectHost(metric)
        query = struct.pack(
            "!L", len(metric)) + metric  # 32-bit length prefix string
        connection = None

        try:
            connection = self.getConnection(host)
            connection.sendall(query)

            # To keep things asynchronous we return a result callback
            def receiveResponse():
                try:
                    buf = ''
                    remaining = 4
                    message_size = None

                    while remaining:
                        packet = connection.recv(remaining)
                        assert packet, "CarbonLink lost connection to %s" % str(
                            host)

                        buf += packet

                        if message_size is None:
                            if len(buf) == 4:
                                remaining = message_size = struct.unpack(
                                    "!L", buf)[0]
                                buf = ''
                                continue

                        remaining -= len(packet)

                    # We're done with the connection for this request, put it in the pool
                    self.putConnectionInPool(host, connection)

                    # Now parse the response
                    points = pickle.loads(buf)
                    log.cache("CarbonLink to %s, retrieved %d points for %s" %
                              (host, len(points), metric))

                    for point in points:
                        yield point

                except:
                    log.exception(
                        "CarbonLink to %s, exception while getting response" %
                        str(host))
                    self.removeConnectionFromPool(host, connection)

            return receiveResponse
        except:
            log.exception("CarbonLink to %s, exception while sending request" %
                          str(host))
            if connection:
                self.removeConnectionFromPool(host, connection)
            noResults = lambda: []
            return noResults
Exemplo n.º 18
0
instances = []
unwelcome_instances = []
for arg in sys.argv[1:]:
    unwelcome = False
    if arg.startswith('-'):
        arg = arg[1:]
        unwelcome = True
    instance = tuple(arg.split(':', 2))
    instances.append(instance)
    if unwelcome:
        unwelcome_instances.append(instance)
if 0 == len(instances):
    print('Usage: python whisper-clean.py [-]<address>:<instance>[...]')
    sys.exit(1)

ring = ConsistentHashRing(instances)

for dirname, dirnames, filenames in os.walk('/var/lib/graphite/whisper'):
    for filename in filenames:
        pathname = os.path.join(dirname, filename)
        basename, ext = os.path.splitext(filename)
        if '.wsp' != ext:
            print('skipping %s' %
                  os.path.relpath(pathname, '/var/lib/graphite/whisper'))
        if ring.get_node(
                os.path.relpath(os.path.join(dirname, basename),
                                '/var/lib/graphite/whisper').replace(
                                    '/', '.')) in unwelcome_instances:
            print('unlinking %s' % pathname)
            os.unlink(pathname)
Exemplo n.º 19
0
 def test_chr_get_nodes(self):
     hosts = [("127.0.0.1", "cache0"),("127.0.0.1", "cache1"),("127.0.0.1", "cache2")]
     hashring = ConsistentHashRing(hosts)
     node = hashring.get_nodes('hosts.worker1.cpu')
     self.assertEqual(node, [('127.0.0.1', 'cache2'), ('127.0.0.1', 'cache0'), ('127.0.0.1', 'cache1')])
Exemplo n.º 20
0
class CarbonLinkPool(object):
    def __init__(self, hosts, timeout):
        self.hosts = [(server, instance) for (server, port, instance) in hosts]
        self.ports = {(server, instance): port
                      for (server, port, instance) in hosts}
        self.timeout = float(timeout)
        servers = set([server for (server, port, instance) in hosts])
        if len(servers) < settings.REPLICATION_FACTOR:
            raise Exception("REPLICATION_FACTOR=%d cannot exceed servers=%d" %
                            (settings.REPLICATION_FACTOR, len(servers)))

        self.hash_ring = ConsistentHashRing(
            self.hosts, hash_type=settings.CARBONLINK_HASHING_TYPE)
        self.keyfunc = load_keyfunc()
        self.connections = {}
        self.last_failure = {}
        # Create a connection pool for each host
        for host in self.hosts:
            self.connections[host] = set()

    def select_host(self, metric):
        "Returns the carbon host that has data for the given metric"
        key = self.keyfunc(metric)
        nodes = []
        servers = set()
        for node in self.hash_ring.get_nodes(key):
            (server, instance) = node
            if server in servers:
                continue
            servers.add(server)
            nodes.append(node)
            if len(servers) >= settings.REPLICATION_FACTOR:
                break

        available = [n for n in nodes if self.is_available(n)]
        return random.choice(available or nodes)

    def is_available(self, host):
        now = time.time()
        last_fail = self.last_failure.get(host, 0)
        return (now - last_fail) < settings.CARBONLINK_RETRY_DELAY

    def get_connection(self, host):
        # First try to take one out of the pool for this host
        (server, instance) = host
        port = self.ports[host]
        connectionPool = self.connections[host]
        try:
            return connectionPool.pop()
        except KeyError:
            pass  #nothing left in the pool, gotta make a new connection

        log.cache("CarbonLink creating a new socket for %s" % str(host))
        try:
            connection = socket.create_connection((server, port), self.timeout)
        except socket.error:
            self.last_failure[host] = time.time()
            raise
        else:
            connection.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
            return connection

    def query(self, metric):
        request = dict(type='cache-query', metric=metric)
        results = self.send_request(request)
        log.cache(
            "CarbonLink cache-query request for %s returned %d datapoints" %
            (metric, len(results['datapoints'])))
        return results['datapoints']

    def get_metadata(self, metric, key):
        request = dict(type='get-metadata', metric=metric, key=key)
        results = self.send_request(request)
        log.cache("CarbonLink get-metadata request received for %s:%s" %
                  (metric, key))
        return results['value']

    def set_metadata(self, metric, key, value):
        request = dict(type='set-metadata',
                       metric=metric,
                       key=key,
                       value=value)
        results = self.send_request(request)
        log.cache("CarbonLink set-metadata request received for %s:%s" %
                  (metric, key))
        return results

    def send_request(self, request):
        metric = request['metric']
        serialized_request = pickle.dumps(
            request, protocol=settings.CARBONLINK_PICKLE_PROTOCOL)
        len_prefix = struct.pack("!L", len(serialized_request))
        request_packet = len_prefix + serialized_request
        result = {}
        result.setdefault('datapoints', [])

        if metric.startswith(settings.CARBON_METRIC_PREFIX):
            return self.send_request_to_all(request)

        if not self.hosts:
            log.cache(
                "CarbonLink is not connected to any host. Returning empty nodes list"
            )
            return result

        host = self.select_host(metric)
        conn = self.get_connection(host)
        log.cache("CarbonLink sending request for %s to %s" %
                  (metric, str(host)))
        try:
            conn.sendall(request_packet)
            result = self.recv_response(conn)
        except Exception as e:
            self.last_failure[host] = time.time()
            log.cache("Exception getting data from cache %s: %s" %
                      (str(host), e))
        else:
            self.connections[host].add(conn)
            if 'error' in result:
                log.cache("Error getting data from cache: %s" %
                          result['error'])
                raise CarbonLinkRequestError(result['error'])
            log.cache("CarbonLink finished receiving %s from %s" %
                      (str(metric), str(host)))
        return result

    def send_request_to_all(self, request):
        metric = request['metric']
        serialized_request = pickle.dumps(
            request, protocol=settings.CARBONLINK_PICKLE_PROTOCOL)
        len_prefix = struct.pack("!L", len(serialized_request))
        request_packet = len_prefix + serialized_request
        results = {}
        results.setdefault('datapoints', {})

        for host in self.hosts:
            conn = self.get_connection(host)
            log.cache("CarbonLink sending request for %s to %s" %
                      (metric, str(host)))
            try:
                conn.sendall(request_packet)
                result = self.recv_response(conn)
            except Exception as e:
                self.last_failure[host] = time.time()
                log.cache("Exception getting data from cache %s: %s" %
                          (str(host), e))
            else:
                self.connections[host].add(conn)
                if 'error' in result:
                    log.cache("Error getting data from cache %s: %s" %
                              (str(host), result['error']))
                else:
                    if len(result['datapoints']) > 1:
                        results['datapoints'].update(result['datapoints'])
            log.cache("CarbonLink finished receiving %s from %s" %
                      (str(metric), str(host)))

        return results

    def recv_response(self, conn):
        len_prefix = self.recv_exactly(conn, 4)
        body_size = struct.unpack("!L", len_prefix)[0]
        body = self.recv_exactly(conn, body_size)
        return unpickle.loads(body)

    @staticmethod
    def recv_exactly(conn, num_bytes):
        buf = b''
        while len(buf) < num_bytes:
            data = conn.recv(num_bytes - len(buf))
            if not data:
                raise Exception("Connection lost")
            buf += data

        return buf
Exemplo n.º 21
0
class CarbonLinkPool:
    def __init__(self, hosts, timeout):
        self.hosts = [(server, instance) for (server, port, instance) in hosts]
        self.ports = dict(
            ((server, instance), port) for (server, port, instance) in hosts)
        self.timeout = float(timeout)
        self.hash_ring = ConsistentHashRing(self.hosts)
        self.connections = {}
        self.last_failure = {}
        # Create a connection pool for each host
        for host in self.hosts:
            self.connections[host] = set()

    def select_host(self, metric):
        "Returns the carbon host that has data for the given metric"
        return self.hash_ring.get_node(metric)

    def get_connection(self, host):
        # First try to take one out of the pool for this host
        (server, instance) = host
        port = self.ports[host]
        connectionPool = self.connections[host]
        try:
            return connectionPool.pop()
        except KeyError:
            pass  #nothing left in the pool, gotta make a new connection

        log.cache("CarbonLink creating a new socket for %s" % str(host))
        connection = socket.socket()
        connection.settimeout(self.timeout)
        try:
            connection.connect((server, port))
        except:
            self.last_failure[host] = time.time()
            raise
        else:
            connection.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
            return connection

    def query(self, metric):
        request = dict(type='cache-query', metric=metric)
        results = self.send_request(request)
        log.cache(
            "CarbonLink cache-query request for %s returned %d datapoints" %
            (metric, len(results)))
        return results['datapoints']

    def get_metadata(self, metric, key):
        request = dict(type='get-metadata', metric=metric, key=key)
        results = self.send_request(request)
        log.cache("CarbonLink get-metadata request received for %s:%s" %
                  (metric, key))
        return results['value']

    def set_metadata(self, metric, key, value):
        request = dict(type='set-metadata',
                       metric=metric,
                       key=key,
                       value=value)
        results = self.send_request(request)
        log.cache("CarbonLink set-metadata request received for %s:%s" %
                  (metric, key))
        return results

    def send_request(self, request):
        metric = request['metric']
        serialized_request = pickle.dumps(request, protocol=-1)
        len_prefix = struct.pack("!L", len(serialized_request))
        request_packet = len_prefix + serialized_request

        host = self.select_host(metric)
        conn = self.get_connection(host)
        try:
            conn.sendall(request_packet)
            result = self.recv_response(conn)
        except:
            self.last_failure[host] = time.time()
            raise
        else:
            self.connections[host].add(conn)
            if 'error' in result:
                raise CarbonLinkRequestError(result['error'])
            else:
                return result

    def recv_response(self, conn):
        len_prefix = recv_exactly(conn, 4)
        body_size = struct.unpack("!L", len_prefix)[0]
        body = recv_exactly(conn, body_size)
        return pickle.loads(body)
Exemplo n.º 22
0
 def test_chr_compute_ring_position_fnv1a(self):
     hosts = [("127.0.0.1", "ba603c36342304ed77953f84ac4d357b"),("127.0.0.2", "5dd63865534f84899c6e5594dba6749a"),
     ("127.0.0.3", "866a18b81f2dc4649517a1df13e26f28")]
     hashring = ConsistentHashRing(hosts, hash_type='fnv1a_ch')
     self.assertEqual(hashring.compute_ring_position('hosts.worker1.cpu'), 59573)
     self.assertEqual(hashring.compute_ring_position('hosts.worker2.cpu'), 35749)
Exemplo n.º 23
0
class CarbonLinkPool:
    def __init__(self, hosts, timeout):
        self.hosts = [(server, instance) for (server, port, instance) in hosts]
        self.ports = dict(
            ((server, instance), port) for (server, port, instance) in hosts)
        self.timeout = float(timeout)
        servers = set([server for (server, port, instance) in hosts])
        if len(servers) < settings.REPLICATION_FACTOR:
            raise Exception("REPLICATION_FACTOR=%d cannot exceed servers=%d" %
                            (settings.REPLICATION_FACTOR, len(servers)))

        self.hash_ring = ConsistentHashRing(self.hosts)
        self.keyfunc = load_keyfunc()
        self.connections = {}
        self.last_failure = {}
        # Create a connection pool for each host
        for host in self.hosts:
            self.connections[host] = set()

    def select_host(self, metric):
        "Returns the carbon host that has data for the given metric"
        key = self.keyfunc(metric)
        nodes = []
        servers = set()
        for node in self.hash_ring.get_nodes(key):
            (server, instance) = node
            if server in servers:
                continue
            servers.add(server)
            nodes.append(node)
            if len(servers) >= settings.REPLICATION_FACTOR:
                break

        available = [n for n in nodes if self.is_available(n)]
        return random.choice(available or nodes)

    def is_available(self, host):
        now = time.time()
        last_fail = self.last_failure.get(host, 0)
        return (now - last_fail) < settings.CARBONLINK_RETRY_DELAY

    def get_connection(self, host):
        # First try to take one out of the pool for this host
        (server, instance) = host
        port = self.ports[host]
        connectionPool = self.connections[host]
        try:
            return connectionPool.pop()
        except KeyError:
            pass  #nothing left in the pool, gotta make a new connection

        log.cache("CarbonLink creating a new socket for %s" % str(host))
        connection = socket.socket()
        connection.settimeout(self.timeout)
        try:
            connection.connect((server, port))
        except:
            self.last_failure[host] = time.time()
            raise
        else:
            connection.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
            return connection

    def query(self, metric):
        request = dict(type='cache-query', metric=metric)
        results = self.send_request(request)
        log.cache(
            "CarbonLink cache-query request for %s returned %d datapoints" %
            (metric, len(results['datapoints'])))
        return results['datapoints']

    def get_metadata(self, metric, key):
        request = dict(type='get-metadata', metric=metric, key=key)
        results = self.send_request(request)
        log.cache("CarbonLink get-metadata request received for %s:%s" %
                  (metric, key))
        return results['value']

    def set_metadata(self, metric, key, value):
        request = dict(type='set-metadata',
                       metric=metric,
                       key=key,
                       value=value)
        results = self.send_request(request)
        log.cache("CarbonLink set-metadata request received for %s:%s" %
                  (metric, key))
        return results

    def send_request(self, request):
        metric = request['metric']
        serialized_request = pickle.dumps(request, protocol=-1)
        len_prefix = struct.pack("!L", len(serialized_request))
        request_packet = len_prefix + serialized_request
        result = {}
        result.setdefault('datapoints', [])

        if metric.startswith(settings.CARBON_METRIC_PREFIX):
            return self.send_request_to_all(request)

        host = self.select_host(metric)
        conn = self.get_connection(host)
        log.cache("CarbonLink sending request for %s to %s" %
                  (metric, str(host)))
        try:
            conn.sendall(request_packet)
            result = self.recv_response(conn)
        except Exception, e:
            self.last_failure[host] = time.time()
            log.cache("Exception getting data from cache %s: %s" %
                      (str(host), e))
        else:
Exemplo n.º 24
0
class CarbonLinkPool(object):
  def __init__(self, hosts, timeout):
    self.hosts = [ (server, instance) for (server, port, instance) in hosts ]
    self.ports = dict(
      ((server, instance), port) for (server, port, instance) in hosts )
    self.timeout = float(timeout)
    servers = set([server for (server, port, instance) in hosts])
    if len(servers) < settings.REPLICATION_FACTOR:
      raise Exception("REPLICATION_FACTOR=%d cannot exceed servers=%d" % (
        settings.REPLICATION_FACTOR, len(servers)))

    self.hash_ring = ConsistentHashRing(
      self.hosts, hash_type=settings.CARBONLINK_HASHING_TYPE)
    self.keyfunc = load_keyfunc()
    self.connections = {}
    self.last_failure = {}
    # Create a connection pool for each host
    for host in self.hosts:
      self.connections[host] = set()

  def select_host(self, metric):
    "Returns the carbon host that has data for the given metric"
    key = self.keyfunc(metric)
    nodes = []
    servers = set()
    for node in self.hash_ring.get_nodes(key):
      (server, instance) = node
      if server in servers:
        continue
      servers.add(server)
      nodes.append(node)
      if len(servers) >= settings.REPLICATION_FACTOR:
        break

    available = [ n for n in nodes if self.is_available(n) ]
    return random.choice(available or nodes)

  def is_available(self, host):
    now = time.time()
    last_fail = self.last_failure.get(host, 0)
    return (now - last_fail) < settings.CARBONLINK_RETRY_DELAY

  def get_connection(self, host):
    # First try to take one out of the pool for this host
    (server, instance) = host
    port = self.ports[host]
    connectionPool = self.connections[host]
    try:
      return connectionPool.pop()
    except KeyError:
      pass #nothing left in the pool, gotta make a new connection

    log.cache("CarbonLink creating a new socket for %s" % str(host))
    connection = socket.socket()
    connection.settimeout(self.timeout)
    try:
      connection.connect((server, port))
    except:
      self.last_failure[host] = time.time()
      raise
    else:
      connection.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
      return connection

  def query(self, metric):
    request = dict(type='cache-query', metric=metric)
    results = self.send_request(request)
    log.cache("CarbonLink cache-query request for %s returned %d datapoints" % (
      metric, len(results['datapoints'])))
    return results['datapoints']

  def get_metadata(self, metric, key):
    request = dict(type='get-metadata', metric=metric, key=key)
    results = self.send_request(request)
    log.cache("CarbonLink get-metadata request received for %s:%s" % (metric, key))
    return results['value']

  def set_metadata(self, metric, key, value):
    request = dict(type='set-metadata', metric=metric, key=key, value=value)
    results = self.send_request(request)
    log.cache("CarbonLink set-metadata request received for %s:%s" % (metric, key))
    return results

  def send_request(self, request):
    metric = request['metric']
    serialized_request = pickle.dumps(request, protocol=-1)
    len_prefix = struct.pack("!L", len(serialized_request))
    request_packet = len_prefix + serialized_request
    result = {}
    result.setdefault('datapoints', [])

    if metric.startswith(settings.CARBON_METRIC_PREFIX):
      return self.send_request_to_all(request)

    if not self.hosts:
      log.cache("CarbonLink is not connected to any host. Returning empty nodes list")
      return result

    host = self.select_host(metric)
    conn = self.get_connection(host)
    log.cache("CarbonLink sending request for %s to %s" % (metric, str(host)))
    try:
      conn.sendall(request_packet)
      result = self.recv_response(conn)
    except Exception as e:
      self.last_failure[host] = time.time()
      log.cache("Exception getting data from cache %s: %s" % (str(host), e))
    else:
      self.connections[host].add(conn)
      if 'error' in result:
        log.cache("Error getting data from cache: %s" % result['error'])
        raise CarbonLinkRequestError(result['error'])
      log.cache("CarbonLink finished receiving %s from %s" % (str(metric), str(host)))
    return result

  def send_request_to_all(self, request):
    metric = request['metric']
    serialized_request = pickle.dumps(request, protocol=-1)
    len_prefix = struct.pack("!L", len(serialized_request))
    request_packet = len_prefix + serialized_request
    results = {}
    results.setdefault('datapoints', {})

    for host in self.hosts:
      conn = self.get_connection(host)
      log.cache("CarbonLink sending request for %s to %s" % (metric, str(host)))
      try:
        conn.sendall(request_packet)
        result = self.recv_response(conn)
      except Exception as e:
        self.last_failure[host] = time.time()
        log.cache("Exception getting data from cache %s: %s" % (str(host), e))
      else:
        self.connections[host].add(conn)
        if 'error' in result:
          log.cache("Error getting data from cache %s: %s" % (str(host), result['error']))
        else:
          if len(result['datapoints']) > 1:
              results['datapoints'].update(result['datapoints'])
      log.cache("CarbonLink finished receiving %s from %s" % (str(metric), str(host)))
    return results

  def recv_response(self, conn):
    len_prefix = self.recv_exactly(conn, 4)
    body_size = struct.unpack("!L", len_prefix)[0]
    body = self.recv_exactly(conn, body_size)
    return unpickle.loads(body)

  @staticmethod
  def recv_exactly(conn, num_bytes):
    buf = b''
    while len(buf) < num_bytes:
      data = conn.recv(num_bytes - len(buf))
      if not data:
        raise Exception("Connection lost")
      buf += data

    return buf