def get_connection(parsed_url, options, defer_connect=False): host = parsed_url.hostname port = parsed_url.port or MySQLLock.MYSQL_DEFAULT_PORT dbname = parsed_url.path[1:] username = parsed_url.username password = parsed_url.password unix_socket = options.get("unix_socket") try: if unix_socket: return pymysql.Connect(unix_socket=unix_socket, port=port, user=username, passwd=password, database=dbname, defer_connect=defer_connect) else: return pymysql.Connect(host=host, port=port, user=username, passwd=password, database=dbname, defer_connect=defer_connect) except (pymysql.err.OperationalError, pymysql.err.InternalError) as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e)
def _lock(): # NOTE(sileht): mysql-server (<5.7.5) allows only one lock per # connection at a time: # select GET_LOCK("a", 0); # select GET_LOCK("b", 0); <-- this release lock "a" ... # Or # select GET_LOCK("a", 0); # select GET_LOCK("a", 0); release and lock again "a" # # So, we track locally the lock status with self.acquired if self.acquired is True: if blocking: raise _retry.TryAgain return False try: if not self._conn.open: self._conn.connect() with self._conn as cur: cur.execute("SELECT GET_LOCK(%s, 0);", self.name) # Can return NULL on error if cur.fetchone()[0] is 1: self.acquired = True return True except pymysql.MySQLError as e: utils.raise_with_cause( tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e) if blocking: raise _retry.TryAgain self._conn.close() return False
def _translate_failures(): try: yield except (EnvironmentError, voluptuous.Invalid) as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def get(self, timeout=10): try: return self._fut.result(timeout=timeout) except futures.TimeoutError as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e)
def _lock(): # NOTE(sileht): mysql-server (<5.7.5) allows only one lock per # connection at a time: # select GET_LOCK("a", 0); # select GET_LOCK("b", 0); <-- this release lock "a" ... # Or # select GET_LOCK("a", 0); # select GET_LOCK("a", 0); release and lock again "a" # # So, we track locally the lock status with self.acquired if self.acquired is True: if blocking: raise _retry.TryAgain return False try: if not self._conn.open: self._conn.connect() with self._conn as cur: cur.execute("SELECT GET_LOCK(%s, 0);", self.name) # Can return NULL on error if cur.fetchone()[0] is 1: self.acquired = True return True except pymysql.MySQLError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e) if blocking: raise _retry.TryAgain self._conn.close() return False
def _translating_cursor(conn): try: with conn.cursor() as cur: yield cur except psycopg2.Error as e: utils.raise_with_cause(tooz.ToozError, _format_exception(e), cause=e)
def get(self, timeout=None): try: if self._failure_translator: with self._failure_translator(): return self._fut.result(timeout=timeout) else: return self._fut.result(timeout=timeout) except futures.TimeoutError as e: utils.raise_with_cause(OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e)
def get(self, timeout=None): try: # Late translate the common failures since the file driver # may throw things that we can not catch in the callbacks where # it is used. with _translate_failures(): return self._fut.result(timeout=timeout) except futures.TimeoutError as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e)
def _translate_failures(): """Translates common redis exceptions into tooz exceptions.""" try: yield except (exceptions.ConnectionError, exceptions.TimeoutError) as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e) except exceptions.RedisError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except ValueError as e: # Typically json decoding failed for some reason. utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e) except requests.exceptions.RequestException as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e)
def get(self, timeout=10): try: # Late translate the common failures since the redis client # may throw things that we can not catch in the callbacks where # it is used (especially one that uses the transaction # method). with _translate_failures(): return self._fut.result(timeout=timeout) except futures.TimeoutError as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e)
def _failure_translator(): """Translates common consul exceptions into tooz exceptions.""" try: yield except (consul.Timeout, requests.exceptions.RequestException) as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e) except (consul.ConsulException, ValueError) as e: # ValueError = Typically json decoding failed for some reason. utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def release(self): if not self.acquired: return False try: with self._conn as cur: cur.execute("SELECT RELEASE_LOCK(%s);", self.name) cur.fetchone() self.acquired = False return True except pymysql.MySQLError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def _update_capabilities_handler(async_result, timeout, timeout_exception, group_id, member_id): try: async_result.get(block=True, timeout=timeout) except timeout_exception as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except exceptions.NoNodeError: raise coordination.MemberNotJoined(group_id, member_id) except exceptions.ZookeeperError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def release(self): if not self.acquired: return False try: with self._conn as cur: cur.execute("SELECT RELEASE_LOCK(%s);", self.name) cur.fetchone() self.acquired = False self._conn.close() return True except pymysql.MySQLError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def is_still_owner(self): if not self.acquired: return False try: data, _znode = self._client.get( paths.join(self._lock.path, self._lock.node)) return data == self._lock.data except (self._client.handler.timeout_exception, exceptions.ConnectionLoss, exceptions.ConnectionDropped, exceptions.NoNodeError): return False except exceptions.KazooException as e: utils.raise_with_cause(tooz.ToozError, "operation error: %s" % (e), cause=e)
def _get_members_handler(async_result, timeout, timeout_exception, group_id): try: members_ids = async_result.get(block=True, timeout=timeout) except timeout_exception as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except exceptions.NoNodeError: raise coordination.GroupNotCreated(group_id) except exceptions.ZookeeperError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e) else: return set(m.encode('ascii') for m in members_ids)
def _start(self): try: self._coord.start(timeout=self.timeout) except self._coord.handler.timeout_exception as e: e_msg = encodeutils.exception_to_unicode(e) utils.raise_with_cause(coordination.ToozConnectionError, "Operational error: %s" % e_msg, cause=e) try: self._coord.ensure_path(self._paths_join("/", self._namespace)) except exceptions.KazooException as e: e_msg = encodeutils.exception_to_unicode(e) utils.raise_with_cause(tooz.ToozError, "Operational error: %s" % e_msg, cause=e) self._leader_locks = {}
def _join_group_handler(async_result, timeout, timeout_exception, group_id, member_id): try: async_result.get(block=True, timeout=timeout) except timeout_exception as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except exceptions.NodeExistsError: raise coordination.MemberAlreadyExist(group_id, member_id) except exceptions.NoNodeError: raise coordination.GroupNotCreated(group_id) except exceptions.ZookeeperError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def _start(self): super(RedisDriver, self)._start() try: self._client = self._make_client(self._parsed_url, self._options, self.timeout) except exceptions.RedisError as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e) else: # Ensure that the server is alive and not dead, this does not # ensure the server will always be alive, but does insure that it # at least is alive once... with _translate_failures(): self._server_info = self._client.info() # Validate we have a good enough redis version we are connected # to so that the basic set of features we support will actually # work (instead of blowing up). new_enough, redis_version = self._check_fetch_redis_version( self.MIN_VERSION) if not new_enough: raise tooz.NotImplemented("Redis version greater than or" " equal to '%s' is required" " to use this driver; '%s' is" " being used which is not new" " enough" % (self.MIN_VERSION, redis_version)) tpl_params = { 'group_existence_value': self.GROUP_EXISTS_VALUE, 'group_existence_key': self.GROUP_EXISTS, } # For py3.x ensure these are unicode since the string template # replacement will expect unicode (and we don't want b'' as a # prefix which will happen in py3.x if this is not done). for (k, v) in six.iteritems(tpl_params.copy()): if isinstance(v, six.binary_type): v = v.decode('ascii') tpl_params[k] = v prepared_scripts = {} for name, raw_script_tpl in six.iteritems(self.SCRIPTS): script_tpl = string.Template(raw_script_tpl) script = script_tpl.substitute(**tpl_params) prepared_scripts[name] = self._client.register_script(script) self._scripts = prepared_scripts self.heartbeat() self._started = True
def heartbeat(self): # Just fetch the base path (and do nothing with it); this will # force any waiting heartbeat responses to be flushed, and also # ensures that the connection still works as expected... base_path = self._paths_join("/", self._namespace) try: self._coord.get(base_path) except self._timeout_exception as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except exceptions.NoNodeError: pass except exceptions.ZookeeperError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e) return self.timeout
def get_connection(parsed_url, options): host = options.get("host") or parsed_url.hostname port = options.get("port") or parsed_url.port dbname = options.get("dbname") or parsed_url.path[1:] kwargs = {} if parsed_url.username is not None: kwargs["user"] = parsed_url.username if parsed_url.password is not None: kwargs["password"] = parsed_url.password try: return psycopg2.connect(host=host, port=port, database=dbname, **kwargs) except psycopg2.Error as e: utils.raise_with_cause(coordination.ToozConnectionError, _format_exception(e), cause=e)
def _failure_translator(): """Translates common pymemcache exceptions into tooz exceptions. https://github.com/pinterest/pymemcache/blob/d995/pymemcache/client.py#L202 """ try: yield except pymemcache_client.MemcacheUnexpectedCloseError as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e) except (socket.timeout, socket.error, socket.gaierror, socket.herror) as e: # TODO(harlowja): get upstream pymemcache to produce a better # exception for these, using socket (vs. a memcache specific # error) seems sorta not right and/or the best approach... msg = encodeutils.exception_to_unicode(e) if e.errno is not None: msg += " (with errno %s [%s])" % (errno.errorcode[e.errno], e.errno) utils.raise_with_cause(coordination.ToozConnectionError, msg, cause=e) except pymemcache_client.MemcacheError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def _get_member_info_handler(cls, async_result, timeout, timeout_exception, group_id, member_id): try: capabilities, znode_stats = async_result.get(block=True, timeout=timeout) except timeout_exception as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except exceptions.NoNodeError: raise coordination.MemberNotJoined(group_id, member_id) except exceptions.ZookeeperError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e) else: member_info = { 'capabilities': cls._loads(capabilities), 'created_at': utils.millis_to_datetime(znode_stats.ctime), 'updated_at': utils.millis_to_datetime(znode_stats.mtime) } return member_info
def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except etcd3_exc.ConnectionFailedError as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e) except etcd3_exc.ConnectionTimeoutError as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except etcd3_exc.Etcd3Exception as e: utils.raise_with_cause(coordination.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def _failure_translator(): """Translates common requests exceptions into tooz exceptions.""" try: yield except etcd3_exc.ConnectionFailedError as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e) except etcd3_exc.ConnectionTimeoutError as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except etcd3_exc.Etcd3Exception as e: utils.raise_with_cause(coordination.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def _get_groups_handler(self, async_result, timeout, timeout_exception): try: group_ids = async_result.get(block=True, timeout=timeout) except timeout_exception as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except exceptions.NoNodeError as e: utils.raise_with_cause(tooz.ToozError, "Tooz namespace '%s' has not" " been created" % self._namespace, cause=e) except exceptions.ZookeeperError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e) else: return set(g.encode('ascii') for g in group_ids)
def _create_group_handler(self, async_result, timeout, timeout_exception, group_id): try: async_result.get(block=True, timeout=timeout) except timeout_exception as e: utils.raise_with_cause(coordination.OperationTimedOut, encodeutils.exception_to_unicode(e), cause=e) except exceptions.NodeExistsError: raise coordination.GroupAlreadyExist(group_id) except exceptions.NoNodeError as e: utils.raise_with_cause(tooz.ToozError, "Tooz namespace '%s' has not" " been created" % self._namespace, cause=e) except exceptions.ZookeeperError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)
def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except pymemcache_client.MemcacheUnexpectedCloseError as e: utils.raise_with_cause(coordination.ToozConnectionError, encodeutils.exception_to_unicode(e), cause=e) except (socket.timeout, socket.error, socket.gaierror, socket.herror) as e: # TODO(harlowja): get upstream pymemcache to produce a better # exception for these, using socket (vs. a memcache specific # error) seems sorta not right and/or the best approach... msg = encodeutils.exception_to_unicode(e) if e.errno is not None: msg += " (with errno %s [%s])" % (errno.errorcode[e.errno], e.errno) utils.raise_with_cause(coordination.ToozConnectionError, msg, cause=e) except pymemcache_client.MemcacheError as e: utils.raise_with_cause(tooz.ToozError, encodeutils.exception_to_unicode(e), cause=e)