def update_user(self, service, user, generation=None, client_state=None): if client_state is None: # uid can stay the same, just update the generation number. if generation is not None: params = { 'service': service, 'email': user['email'], 'generation': generation, } res = self._engine.execute(_UPDATE_GENERATION_NUMBER, **params) res.close() user['generation'] = max(generation, user['generation']) else: # reject previously-seen client-state strings. if client_state == user['client_state']: raise BackendError('previously seen client-state string') if client_state in user['old_client_states']: raise BackendError('previously seen client-state string') # need to create a new record for new client_state. if generation is not None: generation = max(user['generation'], generation) else: generation = user['generation'] now = get_timestamp() params = { 'service': service, 'email': user['email'], 'generation': generation, 'client_state': client_state, 'timestamp': now, } try: res = self._engine.execute(_CREATE_USER_RECORD, **params) except IntegrityError: user.update(self.get_user(service, user['email'])) else: self.get_user(service, user['email']) user['uid'] = res.lastrowid user['generation'] = generation user['old_client_states'][user['client_state']] = True user['client_state'] = client_state res.close() # mark old records as having been replaced. # if we crash here, they are unmarked and we may fail to # garbage collect them for a while, but the active state # will be undamaged. params = { 'service': service, 'email': user['email'], 'timestamp': now } res = self._engine.execute(_REPLACE_USER_RECORDS, **params) res.close()
def get_best_node(self, service): """Returns the 'least loaded' node currently available, increments the active count on that node, and decrements the slots currently available """ __, nodes, __ = self._dbs[service] where = [ nodes.c.service == service, nodes.c.available > 0, nodes.c.capacity > nodes.c.current_load, nodes.c.downed == 0 ] query = select([nodes]).where(and_(*where)) query = query.order_by(nodes.c.current_load / nodes.c.capacity).limit(1) res = self._safe_execute(service, query) res = res.fetchone() if res is None: # unable to get a node raise BackendError('unable to get a node') node = str(res.node) current_load = int(res.current_load) available = int(res.available) self.update_node(node, service, available=available - 1, current_load=current_load + 1) return res.node
def update_user(self, service, user, generation=None, client_state=None): if client_state is None: # uid can stay the same, just update the generation number. if generation is not None: params = { 'service': service, 'email': user['email'], 'generation': generation } res = self._safe_execute(_UPDATE_GENERATION_NUMBER, **params) res.close() user['generation'] = max(generation, user['generation']) else: # reject previously-seen client-state strings. if client_state == user['client_state']: raise BackendError('previously seen client-state string') if client_state in user['old_client_states']: raise BackendError('previously seen client-state string') # need to create a new record for new client_state. # try to keep them on the same node, but if e.g. it no longer # exists them allocate them to a new one. try: nodeid = self.get_node_id(service, user['node']) except ValueError: nodeid, node = self.get_best_node(service) user['node'] = node if generation is not None: generation = max(user['generation'], generation) else: generation = user['generation'] now = get_timestamp() params = { 'service': service, 'email': user['email'], 'nodeid': nodeid, 'generation': generation, 'client_state': client_state, 'timestamp': now, } res = self._safe_execute(_CREATE_USER_RECORD, **params) res.close() user['uid'] = res.lastrowid user['generation'] = generation user['old_client_states'][user['client_state']] = True user['client_state'] = client_state # mark old records as having been replaced. # if we crash here, they are unmarked and we may fail to # garbage collect them for a while, but the active state # will be undamaged. self.replace_user_records(service, user['email'], now)
def _safe_execute(self, *args, **kwds): """Execute an sqlalchemy query, raise BackendError on failure.""" try: return self._engine.execute(*args, **kwds) except (OperationalError, TimeoutError), exc: err = traceback.format_exc() logger.error(err) raise BackendError(str(exc))
def update_user(self, service, user, generation=None, client_state=None): if (service, user['email']) not in self._users: raise BackendError('unknown user: '******'email']) if generation is not None: user['generation'] = generation if client_state is not None: user['old_client_states'][user['client_state']] = True user['client_state'] = client_state user['uid'] = self._next_uid self._next_uid += 1
def allocate_node(self, email, service): if self.get_node(email, service) != (None, None): raise BackendError("Node already assigned") # getting a node node = self.get_best_node(service) # saving the node res = self._safe_execute(service, _INSERT, email=email, node=node) # returning the node and last inserted uid return res.lastrowid, node
def report_backend_errors_wrapper(self, *args, **kwds): try: return func(self, *args, **kwds) except Exception, exc: if not is_operational_db_error(self._connector.engine, exc): raise # An unexpected database-level error. # Log the error, then normalize it into a BackendError instance. # Note that this will not not logic errors such as IntegrityError, # only unexpected operational errors from the database. err = traceback.format_exc() self.logger.error(err) raise BackendError(str(exc))
def _get_service_id(self, service): try: return self._cached_service_ids[service] except KeyError: services = self._get_services_table(service) query = select([services.c.id]) query = query.where(services.c.service == service) res = self._safe_execute(query) row = res.fetchone() res.close() if row is None: raise BackendError('unknown service: ' + service) self._cached_service_ids[service] = row.id return row.id
def allocate_user(self, service, email, generation=0, client_state=''): if (service, email) in self._users: raise BackendError('user already exists: ' + email) user = { 'email': email, 'uid': self._next_uid, 'node': self.service_entry, 'generation': generation, 'client_state': client_state, 'old_client_states': {} } self._users[(service, email)] = user self._next_uid += 1 return user
def get_best_node(self, service): """Returns the 'least loaded' node currently available, increments the active count on that node, and decrements the slots currently available """ nodes = self._get_nodes_table(service) service = self._get_service_id(service) where = [ nodes.c.service == service, nodes.c.available > 0, nodes.c.capacity > nodes.c.current_load, nodes.c.downed == 0 ] query = select([nodes]).where(and_(*where)) if self._is_sqlite: # sqlite doesn't have the 'log' funtion, and requires # coercion to a float for the sorting to work. query = query.order_by(nodes.c.current_load * 1.0 / nodes.c.capacity) else: # using log() increases floating-point precision on mysql # and thus makes the sorting more accurate. query = query.order_by( sqlfunc.log(nodes.c.current_load) / sqlfunc.log(nodes.c.capacity)) query = query.limit(1) res = self._safe_execute(query) one = res.fetchone() if one is None: # unable to get a node res.close() raise BackendError('unable to get a node') nodeid = one.id node = str(one.node) res.close() # updating the table where = [nodes.c.service == service, nodes.c.node == node] where = and_(*where) fields = { 'available': nodes.c.available - 1, 'current_load': nodes.c.current_load + 1 } query = update(nodes, where, fields) con = self._safe_execute(query, close=True) con.close() return nodeid, node
def report_backend_errors_wrapper(self, *args, **kwds): try: return func(self, *args, **kwds) except Exception, exc: if not is_operational_db_error(self._connector.engine, exc): raise # An unexpected database-level error. # Log the error, then normalize it into a BackendError instance. # Note that this will not catch logic errors such as e.g. an # IntegrityError, only unexpected operational errors from the # database such as e.g. disconnects and timeouts. err = traceback.format_exc() err = "Caught operational db error: %s\n%s" % (exc, err) logger.error(err) raise BackendError(str(exc))
def update_user(self, service, user, generation=None, client_state=None, keys_changed_at=None, node=None): if (service, user['email']) not in self._users: raise BackendError('unknown user: '******'email']) if node is not None and node != self.service_entry: raise ValueError("unknown node: %s" % (node,)) if generation is not None: user['generation'] = generation if keys_changed_at is not None: user['keys_changed_at'] = keys_changed_at if client_state is not None: user['old_client_states'][user['client_state']] = True user['client_state'] = client_state user['uid'] = self._next_uid self._next_uid += 1 self._users[(service, user['email'])].update(user)
def allocate_user(self, service, email, generation=0, client_state='', keys_changed_at=0, node=None): if (service, email) in self._users: raise BackendError('user already exists: ' + email) if node is not None and node != self.service_entry: raise ValueError("unknown node: %s" % (node,)) user = { 'email': email, 'uid': self._next_uid, 'node': self.service_entry, 'generation': generation, 'keys_changed_at': keys_changed_at, 'client_state': client_state, 'old_client_states': {}, 'first_seen_at': get_timestamp(), } self._users[(service, email)] = user self._next_uid += 1 return user.copy()
def _connect(self): """Context mananager for getting a connection to memcached.""" # We could get an error while trying to create a new connection, # or when trying to use an existing connection. This outer # try-except handles the logging for both cases. try: with self.pool.reserve() as mc: # If we get an error while using the client object, # disconnect so that it will be removed from the pool. try: yield mc except (EnvironmentError, RuntimeError), err: if mc is not None: mc.disconnect() raise except (EnvironmentError, RuntimeError), err: err = traceback.format_exc() logger.error(err) raise BackendError(str(err))
def _safe_execute(self, *args, **kwds): """Execute an sqlalchemy query, raise BackendError on failure.""" if hasattr(args[0], 'bind'): engine = args[0].bind else: engine = None if engine is None: engine = kwds.pop('engine', None) if engine is None: engine = self._get_engine(kwds.get('service')) if 'service' in kwds: kwds['service'] = self._get_service_id(kwds['service']) try: return engine.execute(*args, **kwds) except (OperationalError, TimeoutError), exc: err = traceback.format_exc() logger.error(err) raise BackendError(str(exc))
def execute_retry(engine, *args, **kwargs): try: return engine.execute(*args, **kwargs) except (OperationalError, TimeoutError), exc: retry = '2013' in str(exc) try: if retry: return engine.execute(*args, **kwargs) else: # re-raise raise exc except (OperationalError, TimeoutError), exc: err = traceback.format_exc() logger.error(err) raise BackendError(str(exc)) class SQLDatabase(object): implements(IAppSyncDatabase) def __init__(self, **options): verifier = options.pop("verifier", None) if verifier is None: verifier = vep.RemoteVerifier() else: verifier = maybe_resolve_name(verifier) if callable(verifier): verifier = verifier() self._verifier = verifier
def backend_error(request): raise BackendError(retry_after=0)
def get_best_node(self, service, email): """Returns the 'least loaded' node currently available, increments the active count on that node, and decrements the slots currently available """ nodes = self._get_nodes_table(service) service = self._get_service_id(service) query = select([nodes]) send_to_spanner = self.should_allocate_to_spanner(email) if send_to_spanner: query = query.where(nodes.c.id == self._spanner_node_id) else: # Pick the least-loaded node that has available slots. query = query.where(and_( nodes.c.service == service, nodes.c.available > 0, nodes.c.capacity > nodes.c.current_load, nodes.c.downed == 0, nodes.c.backoff == 0 )) if self._is_sqlite: # sqlite doesn't have the 'log' funtion, and requires # coercion to a float for the sorting to work. query = query.order_by( nodes.c.current_load * 1.0 / nodes.c.capacity) else: # using log() increases floating-point precision on mysql # and thus makes the sorting more accurate. query = query.order_by( sqlfunc.log(nodes.c.current_load) / sqlfunc.log(nodes.c.capacity)) query = query.limit(1) # We may have to re-try the query if we need to release more capacity. # This loop allows a maximum of five retries before bailing out. for _ in xrange(5): res = self._safe_execute(query) row = res.fetchone() res.close() if row is None: # Try to release additional capacity from any nodes # that are not fully occupied. where = and_(nodes.c.service == service, nodes.c.available <= 0, nodes.c.capacity > nodes.c.current_load, nodes.c.downed == 0) fields = { 'available': self._sqlfunc_min( nodes.c.capacity * self.capacity_release_rate, nodes.c.capacity - nodes.c.current_load ), } res = self._safe_execute(update(nodes, where, fields)) res.close() if res.rowcount == 0: break else: break # Did we succeed in finding a node? if row is None: raise BackendError('unable to get a node') nodeid = row.id node = str(row.node) # Update the node to reflect the new assignment. # This is a little racy with concurrent assignments, but no big # deal. where = [nodes.c.service == service, nodes.c.node == node] where = and_(*where) fields = {'current_load': nodes.c.current_load + 1} if not send_to_spanner: fields['available'] = self._sqlfunc_max(nodes.c.available - 1, 0) query = update(nodes, where, fields) con = self._safe_execute(query, close=True) con.close() return nodeid, node
def update_user(self, service, user, generation=None, client_state=None, keys_changed_at=None, node=None): if client_state is None and node is None: # No need for a node-reassignment, just update the row in place. # Note that if we're changing keys_changed_at without changing # client_state, it's because we're seeing an existing value of # keys_changed_at for the first time. params = { 'service': service, 'email': user['email'], 'generation': generation, 'keys_changed_at': keys_changed_at } res = self._safe_execute(_UPDATE_USER_RECORD_IN_PLACE, **params) res.close() user['generation'] = max(generation, user['generation']) user['keys_changed_at'] = max(keys_changed_at, user['keys_changed_at']) else: # Reject previously-seen client-state strings. if client_state is None: client_state = user['client_state'] else: if client_state == user['client_state']: raise BackendError('previously seen client-state string') if client_state in user['old_client_states']: raise BackendError('previously seen client-state string') # Need to create a new record for new user state. # If the node is not explicitly changing, try to keep them on the # same node, but if e.g. it no longer exists them allocate them to # a new one. if node is not None: nodeid = self.get_node_id(service, node) user['node'] = node else: try: nodeid = self.get_node_id(service, user['node']) except ValueError: nodeid, node = self.get_best_node(service) user['node'] = node if generation is not None: generation = max(user['generation'], generation) else: generation = user['generation'] if keys_changed_at is not None: keys_changed_at = max(user['keys_changed_at'], keys_changed_at) else: keys_changed_at = user['keys_changed_at'] now = get_timestamp() params = { 'service': service, 'email': user['email'], 'nodeid': nodeid, 'generation': generation, 'keys_changed_at': keys_changed_at, 'client_state': client_state, 'timestamp': now, } res = self._safe_execute(_CREATE_USER_RECORD, **params) res.close() user['uid'] = res.lastrowid user['generation'] = generation user['keys_changed_at'] = keys_changed_at user['old_client_states'][user['client_state']] = True user['client_state'] = client_state # mark old records as having been replaced. # if we crash here, they are unmarked and we may fail to # garbage collect them for a while, but the active state # will be undamaged. self.replace_user_records(service, user['email'], now)