def load_db_driver(handler): """Wrap all handlers calls in a special construction, that's call rollback if something wrong or commit changes otherwise. Please note, only HTTPError should be rised up from this function. All another possible errors should be handle. """ try: # execute handler and commit changes if all is ok response = handler() db.commit() return response except web.HTTPError: # a special case: commit changes if http error ends with # 200, 201, 202, etc if web.ctx.status.startswith('2'): db.commit() else: db.rollback() raise except (sa_exc.IntegrityError, sa_exc.DataError) as exc: # respond a "400 Bad Request" if database constraints were broken db.rollback() raise BaseHandler.http(400, exc.message) except Exception: db.rollback() raise finally: db.remove()
def consume_msg(self, body, msg): callback = getattr(self.receiver, body["method"]) try: callback(**body["args"]) except errors.CannotFindTask as e: logger.warn(str(e)) msg.ack() except OperationalError as e: if ('TransactionRollbackError' in e.message or 'deadlock' in e.message): logger.exception("Deadlock on message: %s", msg) msg.requeue() else: logger.exception("Operational error on message: %s", msg) msg.ack() except Exception: logger.exception("Message consume failed: %s", msg) msg.ack() except KeyboardInterrupt: logger.error("Receiverd interrupted.") msg.requeue() raise else: db.commit() msg.ack() finally: db.remove()
def consume_msg(self, body, msg): callback = getattr(self.receiver, body["method"]) try: callback(**body["args"]) except errors.CannotFindTask as e: logger.warn(str(e)) msg.ack() except OperationalError as e: if ( 'TransactionRollbackError' in e.message or 'deadlock' in e.message ): logger.exception("Deadlock on message: %s", msg) msg.requeue() else: logger.exception("Operational error on message: %s", msg) msg.ack() except Exception: logger.exception("Message consume failed: %s", msg) msg.ack() except KeyboardInterrupt: logger.error("Receiverd interrupted.") msg.requeue() raise else: db.commit() msg.ack() finally: db.remove()
def collect(resource_type): try: operational_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.operational).all() error_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.error).all() all_envs_last_recs = \ OpenStackWorkloadStatsCollection.get_last_by_resource_type( resource_type) ready_or_error_ids = set([c.id for c in operational_clusters] + [c.id for c in error_clusters]) envs_ids_to_clear = set(r.cluster_id for r in all_envs_last_recs) - \ ready_or_error_ids # Clear current resource data for unavailable clusters. # Current OSWL data is cleared for those clusters which status is not # 'operational' nor 'error' or when cluster was removed. Data is # cleared for cluster only if it was updated recently (today or # yesterday). While this collector is running with interval much # smaller than one day it should not miss any unavailable cluster. for id in envs_ids_to_clear: oswl_statistics_save(id, resource_type, []) # Collect current OSWL data and update data in DB for cluster in operational_clusters: try: client_provider = helpers.ClientProvider(cluster) proxy_for_os_api = utils.get_proxy_for_cluster(cluster) version_info = utils.get_version_info(cluster) with utils.set_proxy(proxy_for_os_api): data = helpers.get_info_from_os_resource_manager( client_provider, resource_type) oswl_statistics_save(cluster.id, resource_type, data, version_info=version_info) except errors.StatsException as e: logger.error("Cannot collect OSWL resource {0} for cluster " "with id {1}. Details: {2}." .format(resource_type, cluster.id, six.text_type(e)) ) except Exception as e: logger.exception("Error while collecting OSWL resource {0} " "for cluster with id {1}. Details: {2}." .format(resource_type, cluster.id, six.text_type(e)) ) db.commit() except Exception as e: logger.exception("Exception while collecting OS workloads " "for resource name {0}. Details: {1}" .format(resource_type, six.text_type(e))) finally: db.remove()
def send_stats_once(self): try: if self.must_send_stats(): if self.ping_collector(): self.send_action_log() self.send_installation_info() self.send_oswl_info() time.sleep(dithered(settings.STATS_SEND_INTERVAL)) else: time.sleep(dithered(settings.COLLECTOR_PING_INTERVAL)) else: time.sleep(dithered(settings.STATS_ENABLE_CHECK_INTERVAL)) except Exception as e: logger.error("Stats sender exception: %s", six.text_type(e)) finally: db.remove()
def send_stats_once(self): try: if objects.MasterNodeSettings.must_send_stats(): if self.ping_collector(): self.send_action_log() self.send_installation_info() self.send_oswl_info() time.sleep(dithered(settings.STATS_SEND_INTERVAL)) else: time.sleep(dithered(settings.COLLECTOR_PING_INTERVAL)) else: time.sleep(dithered(settings.STATS_ENABLE_CHECK_INTERVAL)) except Exception as e: logger.error("Stats sender exception: %s", six.text_type(e)) time.sleep(dithered(settings.COLLECTOR_PING_INTERVAL)) finally: db.remove()
def delete_expired_oswl_entries(): try: deleted_rows_count = \ objects.OpenStackWorkloadStatsCollection.clean_expired_entries() if deleted_rows_count == 0: logger.info("There are no expired OSWL entries in db.") db().commit() logger.info("Expired OSWL entries are " "successfully cleaned from db") except Exception as e: logger.exception("Exception while cleaning oswls entries from " "db. Details: {0}".format(six.text_type(e))) finally: db.remove()
def consume_msg(self, body, msg): callback = getattr(self.receiver, body["method"]) try: callback(**body["args"]) except errors.CannotFindTask as e: logger.warn(str(e)) msg.ack() except Exception: logger.error(traceback.format_exc()) msg.ack() except KeyboardInterrupt: logger.error("Receiverd interrupted.") msg.requeue() raise else: db.commit() msg.ack() finally: db.remove()
def run(self, *args, **kwargs): def dithered(medium): return randint(int(medium * 0.9), int(medium * 1.1)) while True: try: if self.must_send_stats(): if self.ping_collector(): self.send_action_log() self.send_installation_info() time.sleep(dithered(settings.STATS_SEND_INTERVAL)) else: time.sleep(dithered(settings.COLLECTOR_PING_INTERVAL)) else: time.sleep(dithered(settings.STATS_ENABLE_CHECK_INTERVAL)) except Exception as e: logger.error("Stats sender exception: %s", six.text_type(e)) finally: db.remove()
def collect(resource_type): try: operational_clusters = ClusterCollection.filter_by( iterable=None, status=consts.CLUSTER_STATUSES.operational).all() for cluster in operational_clusters: client_provider = utils.ClientProvider(cluster) proxy_for_os_api = utils.get_proxy_for_cluster(cluster) with utils.set_proxy(proxy_for_os_api): data = utils.get_info_from_os_resource_manager( client_provider, resource_type) oswl_statistics_save(cluster.id, resource_type, data) db.commit() except Exception as e: logger.exception("Exception while collecting OS workloads " "for resource name {0}. Details: {1}".format( resource_type, six.text_type(e))) finally: db.remove()
def tearDown(self): db.remove() super(BaseAlembicMigrationTest, self).tearDown()