Esempio n. 1
0
def _translate_failures():
    """Translates common redis exceptions into tooz exceptions."""
    try:
        yield
    except (exceptions.ConnectionError, exceptions.TimeoutError) as e:
        raise coordination.ToozConnectionError(utils.exception_message(e))
    except exceptions.RedisError as e:
        raise coordination.ToozError(utils.exception_message(e))
Esempio n. 2
0
 def _update_capabilities_handler(async_result, timeout,
                                  timeout_exception, group_id, member_id):
     try:
         async_result.get(block=True, timeout=timeout)
     except timeout_exception as e:
         raise coordination.OperationTimedOut(utils.exception_message(e))
     except exceptions.NoNodeError:
         raise coordination.MemberNotJoined(group_id, member_id)
     except exceptions.ZookeeperError as e:
         raise coordination.ToozError(utils.exception_message(e))
Esempio n. 3
0
 def _get_groups_handler(async_result, timeout, timeout_exception):
     try:
         group_ids = async_result.get(block=True, timeout=timeout)
     except timeout_exception as e:
         raise coordination.OperationTimedOut(utils.exception_message(e))
     except exceptions.NoNodeError:
         raise coordination.ToozError("tooz namespace has not been created")
     except exceptions.ZookeeperError as e:
         raise coordination.ToozError(utils.exception_message(e))
     else:
         return set(g.encode('ascii') for g in group_ids)
Esempio n. 4
0
 def _delete_group_handler(async_result, timeout,
                           timeout_exception, group_id):
     try:
         async_result.get(block=True, timeout=timeout)
     except timeout_exception as e:
         raise coordination.OperationTimedOut(utils.exception_message(e))
     except exceptions.NoNodeError:
         raise coordination.GroupNotCreated(group_id)
     except exceptions.NotEmptyError:
         raise coordination.GroupNotEmpty(group_id)
     except exceptions.ZookeeperError as e:
         raise coordination.ToozError(utils.exception_message(e))
Esempio n. 5
0
 def _join_group_handler(async_result, timeout,
                         timeout_exception, group_id, member_id):
     try:
         async_result.get(block=True, timeout=timeout)
     except timeout_exception as e:
         raise coordination.OperationTimedOut(utils.exception_message(e))
     except exceptions.NodeExistsError:
         raise coordination.MemberAlreadyExist(group_id, member_id)
     except exceptions.NoNodeError:
         raise coordination.GroupNotCreated(group_id)
     except exceptions.ZookeeperError as e:
         raise coordination.ToozError(utils.exception_message(e))
Esempio n. 6
0
 def _create_group_handler(async_result, timeout,
                           timeout_exception, group_id):
     try:
         async_result.get(block=True, timeout=timeout)
     except timeout_exception as e:
         raise coordination.OperationTimedOut(utils.exception_message(e))
     except exceptions.NodeExistsError:
         raise coordination.GroupAlreadyExist(group_id)
     except exceptions.NoNodeError:
         raise coordination.ToozError("tooz namespace has not been created")
     except exceptions.ZookeeperError as e:
         raise coordination.ToozError(utils.exception_message(e))
Esempio n. 7
0
 def _get_members_handler(async_result, timeout,
                          timeout_exception, group_id):
     try:
         members_ids = async_result.get(block=True, timeout=timeout)
     except timeout_exception as e:
         raise coordination.OperationTimedOut(utils.exception_message(e))
     except exceptions.NoNodeError:
         raise coordination.GroupNotCreated(group_id)
     except exceptions.ZookeeperError as e:
         raise coordination.ToozError(utils.exception_message(e))
     else:
         return set(m.encode('ascii') for m in members_ids)
Esempio n. 8
0
 def _get_member_capabilities_handler(cls, async_result, timeout,
                                      timeout_exception, group_id,
                                      member_id):
     try:
         capabilities = async_result.get(block=True, timeout=timeout)[0]
     except timeout_exception as e:
         raise coordination.OperationTimedOut(utils.exception_message(e))
     except exceptions.NoNodeError:
         raise coordination.MemberNotJoined(group_id, member_id)
     except exceptions.ZookeeperError as e:
         raise coordination.ToozError(utils.exception_message(e))
     else:
         return cls._loads(capabilities)
Esempio n. 9
0
 def _leave_group_handler(async_result, timeout,
                          timeout_exception, group_id, member_id):
     try:
         async_result.get(block=True, timeout=timeout)
     except timeout_exception as e:
         coordination.raise_with_cause(coordination.OperationTimedOut,
                                       utils.exception_message(e),
                                       cause=e)
     except exceptions.NoNodeError:
         raise coordination.MemberNotJoined(group_id, member_id)
     except exceptions.ZookeeperError as e:
         coordination.raise_with_cause(coordination.ToozError,
                                       utils.exception_message(e),
                                       cause=e)
Esempio n. 10
0
    def get_connection(parsed_url, options):
        host = parsed_url.hostname
        port = parsed_url.port
        dbname = parsed_url.path[1:]
        username = parsed_url.username
        password = parsed_url.password
        unix_socket = options.get("unix_socket")

        try:
            if unix_socket:
                return pymysql.Connect(unix_socket=unix_socket,
                                       port=port,
                                       user=username,
                                       passwd=password,
                                       database=dbname)
            else:
                return pymysql.Connect(host=host,
                                       port=port,
                                       user=username,
                                       passwd=password,
                                       database=dbname)
        except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
            coordination.raise_with_cause(coordination.ToozConnectionError,
                                          utils.exception_message(e),
                                          cause=e)
Esempio n. 11
0
def _translate_failures():
    try:
        yield
    except EnvironmentError as e:
        coordination.raise_with_cause(coordination.ToozError,
                                      utils.exception_message(e),
                                      cause=e)
Esempio n. 12
0
 def get(self, timeout=10):
     try:
         return self._fut.result(timeout=timeout)
     except futures.TimeoutError as e:
         coordination.raise_with_cause(coordination.OperationTimedOut,
                                       utils.exception_message(e),
                                       cause=e)
Esempio n. 13
0
        def _lock():
            # NOTE(sileht): mysql-server (<5.7.5) allows only one lock per
            # connection at a time:
            #  select GET_LOCK("a", 0);
            #  select GET_LOCK("b", 0); <-- this release lock "a" ...
            # Or
            #  select GET_LOCK("a", 0);
            #  select GET_LOCK("a", 0); release and lock again "a"
            #
            # So, we track locally the lock status with self.acquired
            if self.acquired is True:
                if blocking:
                    raise _retry.Retry
                return False

            try:
                with self._conn as cur:
                    cur.execute("SELECT GET_LOCK(%s, 0);", self.name)
                    # Can return NULL on error
                    if cur.fetchone()[0] is 1:
                        self.acquired = True
                        return True
            except pymysql.MySQLError as e:
                coordination.raise_with_cause(coordination.ToozError, utils.exception_message(e), cause=e)

            if blocking:
                raise _retry.Retry
            return False
Esempio n. 14
0
 def acquire(self, blocking=True):
     if blocking is False:
         try:
             cur = self._conn.cursor()
             cur.execute("SELECT GET_LOCK(%s, 0);", self.name)
             # Can return NULL on error
             if cur.fetchone()[0] is 1:
                 return True
             return False
         except pymysql.MySQLError as e:
             raise coordination.ToozError(utils.exception_message(e))
     else:
         def _acquire():
             try:
                 cur = self._conn.cursor()
                 cur.execute("SELECT GET_LOCK(%s, 0);", self.name)
                 if cur.fetchone()[0] is 1:
                     return True
             except pymysql.MySQLError as e:
                 raise coordination.ToozError(utils.exception_message(e))
             raise _retry.Retry
         kwargs = _retry.RETRYING_KWARGS.copy()
         if blocking is not True:
             kwargs['stop_max_delay'] = blocking
         return _retry.Retrying(**kwargs).call(_acquire)
Esempio n. 15
0
 def release(self):
     try:
         with self._conn as cur:
             cur.execute("SELECT RELEASE_LOCK(%s);", self.name)
             return cur.fetchone()[0]
     except pymysql.MySQLError as e:
         raise coordination.ToozError(utils.exception_message(e))
Esempio n. 16
0
 def _start(self):
     self._executor = futures.ThreadPoolExecutor(max_workers=1)
     try:
         self._client = self._make_client(self._parsed_url, self._options,
                                          self.timeout)
     except exceptions.RedisError as e:
         raise coordination.ToozConnectionError(utils.exception_message(e))
     else:
         # Ensure that the server is alive and not dead, this does not
         # ensure the server will always be alive, but does insure that it
         # at least is alive once...
         with _translate_failures():
             self._server_info = self._client.info()
         # Validate we have a good enough redis version we are connected
         # to so that the basic set of features we support will actually
         # work (instead of blowing up).
         new_enough, redis_version = self._check_fetch_redis_version(
             self._MIN_VERSION)
         if not new_enough:
             raise tooz.NotImplemented("Redis version greater than or"
                                       " equal to '%s' is required"
                                       " to use this driver; '%s' is"
                                       " being used which is not new"
                                       " enough" % (self._MIN_VERSION,
                                                    redis_version))
         self.heartbeat()
         self._started = True
Esempio n. 17
0
 def _safe_stop(coord):
     try:
         coord.stop()
     except coordination.ToozError as e:
         message = utils.exception_message(e)
         if (message != 'Can not stop a driver which has not'
                        ' been started'):
             raise
Esempio n. 18
0
 def _acquire():
     try:
         cur = self._conn.cursor()
         cur.execute("SELECT GET_LOCK(%s, 0);", self.name)
         if cur.fetchone()[0] is 1:
             return True
     except pymysql.MySQLError as e:
         raise coordination.ToozError(utils.exception_message(e))
     raise _retry.Retry
Esempio n. 19
0
 def release(self):
     try:
         with self._conn as cur:
             cur.execute("SELECT RELEASE_LOCK(%s);", self.name)
             cur.fetchone()
             self.acquired = False
     except pymysql.MySQLError as e:
         coordination.raise_with_cause(coordination.ToozError,
                                       utils.exception_message(e),
                                       cause=e)
Esempio n. 20
0
 def get(self, timeout=10):
     try:
         # Late translate the common failures since the redis client
         # may throw things that we can not catch in the callbacks where
         # it is used (especially one that uses the transaction
         # method).
         with _translate_failures():
             return self._fut.result(timeout=timeout)
     except futures.TimeoutError as e:
         raise coordination.OperationTimedOut(utils.exception_message(e))
Esempio n. 21
0
 def _safe_stop(coord):
     try:
         coord.stop()
     except coordination.ToozError as e:
         # TODO(harlowja): make this better, so that we don't have to
         # do string checking...
         message = utils.exception_message(e)
         if (message != 'Can not stop a driver which has not'
                        ' been started'):
             raise
Esempio n. 22
0
 def get(self, timeout=10):
     try:
         # Late translate the common failures since the file driver
         # may throw things that we can not catch in the callbacks where
         # it is used.
         with _translate_failures():
             return self._fut.result(timeout=timeout)
     except futures.TimeoutError as e:
         coordination.raise_with_cause(coordination.OperationTimedOut,
                                       utils.exception_message(e),
                                       cause=e)
Esempio n. 23
0
 def _start(self):
     try:
         self.client = pymemcache.client.Client(
             self.host,
             serializer=self._msgpack_serializer,
             deserializer=self._msgpack_deserializer,
             timeout=self.timeout,
             connect_timeout=self.timeout)
     except Exception as e:
         raise coordination.ToozConnectionError(utils.exception_message(e))
     self._group_members = collections.defaultdict(set)
     self._executor = futures.ThreadPoolExecutor(max_workers=1)
     self.heartbeat()
Esempio n. 24
0
 def wrapper(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except pymemcache_client.MemcacheUnexpectedCloseError as e:
         coordination.raise_with_cause(coordination.ToozConnectionError,
                                       utils.exception_message(e),
                                       cause=e)
     except (socket.timeout, socket.error,
             socket.gaierror, socket.herror) as e:
         # TODO(harlowja): get upstream pymemcache to produce a better
         # exception for these, using socket (vs. a memcache specific
         # error) seems sorta not right and/or the best approach...
         msg = utils.exception_message(e)
         if e.errno is not None:
             msg += " (with errno %s [%s])" % (errno.errorcode[e.errno],
                                               e.errno)
         coordination.raise_with_cause(coordination.ToozConnectionError,
                                       msg, cause=e)
     except pymemcache_client.MemcacheError as e:
         coordination.raise_with_cause(coordination.ToozError,
                                       utils.exception_message(e),
                                       cause=e)
Esempio n. 25
0
 def _acquire(retry=False):
     try:
         with self._conn as cur:
             cur.execute("SELECT GET_LOCK(%s, 0);", self.name)
             # Can return NULL on error
             if cur.fetchone()[0] is 1:
                 return True
     except pymysql.MySQLError as e:
         raise coordination.ToozError(utils.exception_message(e))
     if retry:
         raise _retry.Retry
     else:
         return False
Esempio n. 26
0
 def _start(self):
     self._executor = futures.ThreadPoolExecutor(max_workers=1)
     try:
         self._client = self._make_client(self._parsed_url, self._options,
                                          self.timeout)
     except exceptions.RedisError as e:
         raise coordination.ToozConnectionError(utils.exception_message(e))
     else:
         # Ensure that the server is alive and not dead, this does not
         # ensure the server will always be alive, but does insure that it
         # at least is alive once...
         self.heartbeat()
         self._started = True
Esempio n. 27
0
 def _get_member_info_handler(cls, async_result, timeout,
                              timeout_exception, group_id,
                              member_id):
     try:
         capabilities, znode_stats = async_result.get(block=True,
                                                      timeout=timeout)
     except timeout_exception as e:
         coordination.raise_with_cause(coordination.OperationTimedOut,
                                       utils.exception_message(e),
                                       cause=e)
     except exceptions.NoNodeError:
         raise coordination.MemberNotJoined(group_id, member_id)
     except exceptions.ZookeeperError as e:
         coordination.raise_with_cause(coordination.ToozError,
                                       utils.exception_message(e),
                                       cause=e)
     else:
         member_info = {
             'capabilities': cls._loads(capabilities),
             'created_at': utils.millis_to_datetime(znode_stats.ctime),
             'updated_at': utils.millis_to_datetime(znode_stats.mtime)
         }
         return member_info
Esempio n. 28
0
 def _start(self):
     try:
         self.client = pymemcache.client.Client(
             self.host,
             serializer=self._msgpack_serializer,
             deserializer=self._msgpack_deserializer,
             timeout=self.timeout,
             connect_timeout=self.timeout)
         # Run heartbeat here because pymemcache use a lazy connection
         # method and only connect once you do an operation.
         self.heartbeat()
     except Exception as e:
         raise coordination.ToozConnectionError(utils.exception_message(e))
     self._group_members = collections.defaultdict(set)
     self._executor = futures.ThreadPoolExecutor(max_workers=1)
Esempio n. 29
0
 def _start(self):
     try:
         if self._unix_socket:
             self._conn = pymysql.Connect(unix_socket=self._unix_socket,
                                          port=self._port,
                                          user=self._username,
                                          passwd=self._password,
                                          database=self._dbname)
         else:
             self._conn = pymysql.Connect(host=self._host,
                                          port=self._port,
                                          user=self._username,
                                          passwd=self._password,
                                          database=self._dbname)
     except pymysql.err.OperationalError as e:
         raise coordination.ToozConnectionError(utils.exception_message(e))
Esempio n. 30
0
 def _start(self):
     self._executor.start()
     try:
         self._client = self._make_client(self._parsed_url, self._options,
                                          self.timeout)
     except exceptions.RedisError as e:
         coordination.raise_with_cause(coordination.ToozConnectionError,
                                       utils.exception_message(e),
                                       cause=e)
     else:
         # Ensure that the server is alive and not dead, this does not
         # ensure the server will always be alive, but does insure that it
         # at least is alive once...
         with _translate_failures():
             self._server_info = self._client.info()
         # Validate we have a good enough redis version we are connected
         # to so that the basic set of features we support will actually
         # work (instead of blowing up).
         new_enough, redis_version = self._check_fetch_redis_version(
             self.MIN_VERSION)
         if not new_enough:
             raise tooz.NotImplemented("Redis version greater than or"
                                       " equal to '%s' is required"
                                       " to use this driver; '%s' is"
                                       " being used which is not new"
                                       " enough" % (self.MIN_VERSION,
                                                    redis_version))
         tpl_params = {
             'group_existence_value': self.GROUP_EXISTS_VALUE,
             'group_existence_key': self.GROUP_EXISTS,
         }
         # For py3.x ensure these are unicode since the string template
         # replacement will expect unicode (and we don't want b'' as a
         # prefix which will happen in py3.x if this is not done).
         for (k, v) in six.iteritems(tpl_params.copy()):
             if isinstance(v, six.binary_type):
                 v = v.decode('ascii')
             tpl_params[k] = v
         prepared_scripts = {}
         for name, raw_script_tpl in six.iteritems(self.SCRIPTS):
             script_tpl = string.Template(raw_script_tpl)
             script = script_tpl.substitute(**tpl_params)
             prepared_scripts[name] = self._client.register_script(script)
         self._scripts = prepared_scripts
         self.heartbeat()
         self._started = True