Example #1
0
def check_and_send_restart_signal() -> None:
    try:
        if not connection.is_usable():
            logging.warning("*** Sending self SIGUSR1 to trigger a restart.")
            os.kill(os.getpid(), signal.SIGUSR1)
    except Exception:
        pass
Example #2
0
    def run(self):
        # get the vehicle record
        try:
            vehicle = Vehicle.objects.get(veh_name=self.vehicle_name)
        except:
            logger.error("%s: Vehicle '%s' does not exist in database. Add it first.", MY_NAME, self.vehicle_name)
            sys.exit(2)

        # start GPS polling thread
        self.gps_poller.start()

        # catch signals for proper shutdown
        for sig in (SIGABRT, SIGTERM, SIGINT):
            signal(sig, self.cleanup)

        # main execution loop
        while True:
            try:
                time.sleep(self.interval)
                # If we are idle too long the database server may
                # close the connection on us, ping the server to check if
                # the connection is still up.
                if connection.connection is not None:
                    if connection.is_usable():
                        logger.debug("%s: Database connection is up.", MY_NAME)
                    else:
                        logger.error("%s: Database connection is down.", MY_NAME)
                        connection.close()
                else:
                    logger.error("%s: Database connection is closed.", MY_NAME)

                # process GPS data
                session = self.gps_poller.session
                if (session.fix.mode == MODE_NO_FIX) and not self.nofix:
                    logger.info("%s: Waiting for GPS to fix...", MY_NAME)
                    continue

                if not isnan(session.fix.time):
                    if (session.fix.speed < 0.1) and (self.last_speed < 0.1):
                        continue
                    self.last_speed = session.fix.speed
                    # if the time is valid the data record is valid
                    location = Location()
                    location.loc_vehicle = vehicle
                    location.loc_time = session.utc
                    location.loc_latitude = session.fix.latitude
                    location.loc_longitude = session.fix.longitude
                    if session.fix.mode == MODE_3D:
                        location.loc_altitude = session.fix.altitude
                    location.loc_speed = session.fix.speed
                    location.loc_climb = session.fix.climb
                    location.loc_track = session.fix.track
                    location.save()
                    logger.info("%s: Valid location: %s", MY_NAME, location)
                else:
                    logger.debug("%s: Invalid location: %s", MY_NAME)

            except KeyboardInterrupt:
                print ("\n")
                break
Example #3
0
def check_db_connection():
    from django.db import connection

    if connection.connection:
        #NOTE: (zacky, 2016.MAR.21st) IF CONNECTION IS CLOSED BY BACKEND, CLOSE IT AT DJANGO, WHICH WILL BE SETUP AFTERWARDS.
        if not connection.is_usable():
            connection.close()
Example #4
0
def make_sure_mysql_usable():
    # mysql is lazily connected to in django.
    # connection.connection is None means
    # you have not connected to mysql before
    if connection.connection and not connection.is_usable():
        # destroy the default mysql connection
        # after this line, when you use ORM methods
        # django will reconnect to the default mysql
        del connections._connections.default
Example #5
0
def make_sure_mysql_usable():
    # mysql is lazily connected to in django.
    # connection.connection is None means
    # you have not connected to mysql before
    if connection.connection and not connection.is_usable():
        # destroy the default mysql connection
        # after this line, when you use ORM methods
        # django will reconnect to the default mysql
        del connections._connections.default
Example #6
0
    def BookFilter(self, query):
        if connection.connection and not connection.is_usable():
            del(connections._connections.default)

        q_objects = Q()
        q_objects.add(Q(search_title__contains=query.upper()), Q.OR)
        q_objects.add( Q(authors__search_full_name__contains=query.upper()), Q.OR)
        books = Book.objects.filter(q_objects).order_by('search_title', '-docdate').distinct()

        return books
Example #7
0
def execute_callback(task):
    """异步任务的回调, 将结果填入数据库等等
    使用django-q的hook, 传入参数为整个task
    task.result 是真正的结果
    """
    # https://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
    if connection.connection and not connection.is_usable():
        close_old_connections()
    workflow_id = task.args[0]
    workflow = SqlWorkflow.objects.get(id=workflow_id)
    workflow.finish_time = task.stopped

    if not task.success:
        # 不成功会返回错误堆栈信息,构造一个错误信息
        workflow.status = 'workflow_exception'
        execute_result = ReviewSet(
            full_sql=workflow.sqlworkflowcontent.sql_content)
        execute_result.rows = [
            ReviewResult(stage='Execute failed',
                         errlevel=2,
                         stagestatus='异常终止',
                         errormessage=task.result,
                         sql=workflow.sqlworkflowcontent.sql_content)
        ]
    elif task.result.warning or task.result.error:
        execute_result = task.result
        workflow.status = 'workflow_exception'
    else:
        execute_result = task.result
        workflow.status = 'workflow_finish'
    # 保存执行结果
    workflow.sqlworkflowcontent.execute_result = execute_result.json()
    workflow.sqlworkflowcontent.save()
    workflow.save()

    # 增加工单日志
    audit_id = Audit.detail_by_workflow_id(
        workflow_id=workflow_id,
        workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
    Audit.add_log(audit_id=audit_id,
                  operation_type=6,
                  operation_type_desc='执行结束',
                  operation_info='执行结果:{}'.format(
                      workflow.get_status_display()),
                  operator='',
                  operator_display='系统')

    # DDL工单结束后清空实例资源缓存
    if workflow.syntax_type == 1:
        r = get_redis_connection("default")
        for key in r.scan_iter(match='*insRes*', count=2000):
            r.delete(key)

    # 发送消息
    notify_for_execute(workflow)
Example #8
0
    def BookFilter(self, query):
        if connection.connection and not connection.is_usable():
            del connections._connections.default

        q_objects = Q()
        q_objects.add(Q(search_title__contains=query.upper()), Q.OR)
        q_objects.add(Q(authors__search_full_name__contains=query.upper()),
                      Q.OR)
        books = Book.objects.filter(q_objects).order_by(
            'search_title', '-docdate').distinct()

        return books
Example #9
0
def handler(event, context):
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "regcore.settings")
    import django
    django.setup()

    from django.db import connection
    connection.ensure_connection()
    if not connection.is_usable():
        raise Exception("database is unreachable")

    from django.core.management import call_command
    call_command('migrate')
def make_sure_database_is_usable():
    """
    https://github.com/speedocjx/db_platform/blob/e626a12edf8aceb299686fe19377cd6ff331b530/myapp/include/inception.py#L14
    """
    if connection.connection and not connection.is_usable():
        """
        Database might be lazily connected to in django.
        When connection.connection is None means you have not connected to mysql before.        
        Destroy the default mysql connection after this line, 
        when you use ORM methods django will reconnect to the default database
        """
        del connections._connections.default
Example #11
0
def database_connected(app_configs, **kwargs):
    errors = []

    try:
        connection.ensure_connection()
    except OperationalError:
        errors.append(Error('Could not connect to database', id=ERROR_CANNOT_CONNECT_DATABASE))
    else:
        if not connection.is_usable():
            errors.append(Error('Database connection is not usable', id=ERROR_UNUSABLE_DATABASE))

    return errors
Example #12
0
 def check_settings(self):
     if connection.connection and not connection.is_usable():
         del(connections._connections.default)        
     settings.constance_update_all()
     if not (self.SCAN_SHED_MIN==config.SOPDS_SCAN_SHED_MIN and \
        self.SCAN_SHED_HOUR==config.SOPDS_SCAN_SHED_HOUR and \
        self.SCAN_SHED_DOW==config.SOPDS_SCAN_SHED_DOW and \
        self.SCAN_SHED_DAY==config.SOPDS_SCAN_SHED_DAY):
         self.update_shedule()
     if config.SOPDS_SCAN_START_DIRECTLY:
         config.SOPDS_SCAN_START_DIRECTLY = False
         self.stdout.write('Startup scannyng directly by SOPDS_SCAN_START_DIRECTLY flag.')
         self.sched.add_job(self.scan, id='scan_directly')
Example #13
0
def make_sure_mysql_usable():
    """
    Credit:
        https://github.com/speedocjx/db_platform/blob/master/myapp/include/inception.py#L14
    """
    # mysql is lazily connected to in django.
    # connection.connection is None means
    # you have not connected to mysql before
    if connection.connection and not connection.is_usable():
        # destroy the default mysql connection
        # after this line, when you use ORM methods
        # django will reconnect to the default mysql
        del connections._connections.default
Example #14
0
 def test_posthook(self) -> None:
     cb = Mock(name='post_reconnect_hook')
     ddr.pre_reconnect.connect(fix_connection)
     ddr.post_reconnect.connect(cb)
     from django.db import connection
     connection.close()
     connection.s_connect = connection.connect
     connection.connect = Mock(side_effect=OperationalError('reconnect testing'))
     connection.ensure_connection()
     ReconnectTests.cls_atomics['default'] = transaction.atomic(using='default')
     ReconnectTests.cls_atomics['default'].__enter__()
     self.assertTrue(cb.called)
     self.assertTrue(connection.is_usable())
Example #15
0
def check_db_connection():
    '''
    from: https://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
    '''
    # mysql is lazily connected to in django.
    # connection.connection is None means
    # you have not connected to mysql before
    if connection.connection and not connection.is_usable():
        # destroy the default mysql connection
        # after this line, when you use ORM methods
        # django will reconnect to the default mysql
        logger.debug('deleted default connection')
        del connections._connections.default
Example #16
0
def check_db_connection():
    '''
    from: https://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
    '''
    # mysql is lazily connected to in django.
    # connection.connection is None means
    # you have not connected to mysql before
    if connection.connection and not connection.is_usable():
        # destroy the default mysql connection
        # after this line, when you use ORM methods
        # django will reconnect to the default mysql
        logger.debug('deleted default connection')
        del connections._connections.default
Example #17
0
    def test_posthook(self):
        cb = Mock(name='post_reconnect_hook')
        ddr.pre_reconnect.connect(fix_connection)
        ddr.post_reconnect.connect(cb)
        from django.db import connection
        connection.close()
        self.assertIsNone(connection.connection)

        connection.s_connect = connection.connect
        connection.connect = Mock(side_effect=OperationalError('reconnect testing'))
        connection.ensure_connection()

        self.assertTrue(cb.called)
        self.assertTrue(connection.is_usable())
Example #18
0
def ensure_mysql_connection_usable():
    """Ensure that MySQL connection is usable

    From: http://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
    """
    from django.db import connection, connections
    # MySQL is lazily connected to in Django.
    # connection.connection is None means
    # you have not connected to MySQL before
    if connection.connection and not connection.is_usable():
        # destroy the default MySQL connection
        # after this line, when you use ORM methods
        # Django will reconnect to the default MySQL
        del connections._connections.default
Example #19
0
 def scan(self):
     if self.scan_is_active:
         self.stdout.write('Scan process already active. Skip currend job.')
         return
     
     self.scan_is_active = True
     
     if connection.connection and not connection.is_usable():
         del(connections._connections.default)
             
     scanner=opdsScanner(self.logger)
     with transaction.atomic():
         scanner.scan_all()
     Counter.objects.update_known_counters()  
     self.scan_is_active = False
Example #20
0
    def on_finish(self, *args, **kargs):
        self.session.close()
        for c in connections.all():
            try:
                c._commit()
            except:
                pass

        if connection.connection and not connection.is_usable():
            # destroy the default mysql connection
            # after this line, when you use ORM methods
            # django will reconnect to the default mysql
            del connections._connections.default

        return
Example #21
0
    def scan(self):
        if self.scan_is_active:
            self.stdout.write('Scan process already active. Skip currend job.')
            return

        self.scan_is_active = True

        if connection.connection and not connection.is_usable():
            del (connections._connections.default)

        scanner = opdsScanner(self.logger)
        with transaction.atomic():
            scanner.scan_all()
        Counter.objects.update_known_counters()
        self.scan_is_active = False
Example #22
0
    def on_finish(self, *args, **kargs):
        self.session.close()
        for c in connections.all():
            try:
                c._commit()
            except:
                pass

        if connection.connection and not connection.is_usable():
            # destroy the default mysql connection
            # after this line, when you use ORM methods
            # django will reconnect to the default mysql
            del connections._connections.default

        return
Example #23
0
def update_index(arguments):
    # the spooler is forked so the db connection might be closed
    if not connection.is_usable():
        connection.close()

    category_id = arguments.get("category_id")
    product_id = arguments.get("product_id")

    if category_id is not None:
        products = ProductProxy.objects.filter(categories__id=category_id)
        for product in products:
            index.insert_or_update_object(product)

    if product_id is not None:
        product = ProductProxy.objects.get(pk=product_id)
        index.insert_or_update_object(product)
def database_connected(app_configs, **kwargs):
    errors = []

    try:
        connection.ensure_connection()
    except OperationalError as e:
        msg = 'Could not connect to database: {!s}'.format(e)
        errors.append(Error(msg, id=ERROR_CANNOT_CONNECT_DATABASE))
    except ImproperlyConfigured as e:
        msg = 'Datbase misconfigured: "{!s}"'.format(e)
        errors.append(Error(msg, id=ERROR_MISCONFIGURED_DATABASE))
    else:
        if not connection.is_usable():
            errors.append(Error('Database connection is not usable', id=ERROR_UNUSABLE_DATABASE))

    return errors
Example #25
0
def database_connected(app_configs, **kwargs):
    errors = []

    try:
        connection.ensure_connection()
    except OperationalError:
        errors.append(
            Error('Could not connect to database',
                  id=ERROR_CANNOT_CONNECT_DATABASE))
    else:
        if not connection.is_usable():
            errors.append(
                Error('Database connection is not usable',
                      id=ERROR_UNUSABLE_DATABASE))

    return errors
Example #26
0
 def handle(self, *args, **options):
     """
     Performs a default database ping test. Prints result to stdout
     2019-12-05
     :param args: not used
     :param options: ['wait_interval']
     :return: None
     """
     if len(options):
         sleep(options['wait_interval'])
     if connection.is_usable():
         self.stdout.write(self.style.SUCCESS("database ping success"))
         exit(0)
     else:
         self.stderr.write(self.style.ERROR("database ping failed"))
         exit(1)
Example #27
0
    def prepare(self):

        if connection.connection and not connection.is_usable():
            # destroy the default mysql connection

            # after this line, when you use ORM methods
            # django will reconnect to the default mysql
            del connections._connections.default


        for c in connections.all():
            try:
                c._commit()
            except:
                pass

        return
Example #28
0
    def prepare(self):

        if connection.connection and not connection.is_usable():
            # destroy the default mysql connection

            # after this line, when you use ORM methods
            # django will reconnect to the default mysql
            del connections._connections.default


        for c in connections.all():
            try:
                c._commit()
            except:
                pass

        return
Example #29
0
def database_connected(app_configs, **kwargs):
    errors = []

    try:
        connection.ensure_connection()
    except OperationalError as e:
        msg = 'Could not connect to database: {!s}'.format(e)
        errors.append(Error(msg, id=ERROR_CANNOT_CONNECT_DATABASE))
    except ImproperlyConfigured as e:
        msg = 'Datbase misconfigured: "{!s}"'.format(e)
        errors.append(Error(msg, id=ERROR_MISCONFIGURED_DATABASE))
    else:
        if not connection.is_usable():
            errors.append(
                Error('Database connection is not usable',
                      id=ERROR_UNUSABLE_DATABASE))

    return errors
Example #30
0
    def wrapper(self, bot, update):
        if not config.SOPDS_TELEBOT_AUTH:
            return func(self, bot, update)

        if connection.connection and not connection.is_usable():
            del(connections._connections.default)

        query = update.message if update.message else update.callback_query.message
        username = update.message.from_user.username if update.message else update.callback_query.from_user.username
        users = User.objects.filter(username__iexact=username)

        if users and users[0].is_active:
            return func(self, bot, update)

        bot.sendMessage(chat_id=query.chat_id,
                        text=_("Hello %s!\nUnfortunately you do not have access to information. Please contact the bot administrator.") % username)
        self.logger.info(_("Denied access for user: %s") % username)

        return
Example #31
0
 def test_is_usable_after_database_disconnects(self):
     """
     is_usable() doesn't crash when the database disconnects (#21553).
     """
     # Open a connection to the database.
     with connection.cursor():
         pass
     # Emulate a connection close by the database.
     connection._close()
     # Even then is_usable() should not raise an exception.
     try:
         self.assertFalse(connection.is_usable())
     finally:
         # Clean up the mess created by connection._close(). Since the
         # connection is already closed, this crashes on some backends.
         try:
             connection.close()
         except Exception:
             pass
Example #32
0
 def test_is_usable_after_database_disconnects(self):
     """
     is_usable() doesn't crash when the database disconnects (#21553).
     """
     # Open a connection to the database.
     with connection.cursor():
         pass
     # Emulate a connection close by the database.
     connection._close()
     # Even then is_usable() should not raise an exception.
     try:
         self.assertFalse(connection.is_usable())
     finally:
         # Clean up the mess created by connection._close(). Since the
         # connection is already closed, this crashes on some backends.
         try:
             connection.close()
         except Exception:
             pass
Example #33
0
def ensure_mysql_connection_usable():
    """Ensure that MySQL connection is usable

    From: http://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
    """
    from django.db import connection, connections
    # MySQL is lazily connected to in Django.
    # connection.connection is None means
    # you have not connected to MySQL before
    if connection.connection and not connection.is_usable():
        # destroy the default MySQL connection
        # after this line, when you use ORM methods
        # Django will reconnect to the default MySQL
        #
        # Delete one database connection:
        # del connections._connections.default
        #
        # Delete all database connections
        databases = connections._connections.__dict__.keys()
        for database in databases:
            del connections._connections.__dict__[database]
Example #34
0
    def wrapper(self, bot, update):
        if not config.SOPDS_TELEBOT_AUTH:
            return func(self, bot, update)

        if connection.connection and not connection.is_usable():
            del (connections._connections.default)

        query = update.message if update.message else update.callback_query.message
        username = update.message.from_user.username if update.message else update.callback_query.from_user.username
        users = User.objects.filter(username__iexact=username)

        if users and users[0].is_active:
            return func(self, bot, update)

        bot.sendMessage(
            chat_id=query.chat_id,
            text=
            _("Hello %s!\nUnfortunately you do not have access to information. Please contact the bot administrator."
              ) % username)
        self.logger.info(_("Denied access for user: %s") % username)

        return
Example #35
0
def check_database_connected(app_configs, **kwargs):
    """
    A Django check to see if connecting to the configured default
    database backend succeeds.
    """
    errors = []

    try:
        connection.ensure_connection()
    except OperationalError as e:
        msg = 'Could not connect to database: {!s}'.format(e)
        errors.append(checks.Error(msg, id=ERROR_CANNOT_CONNECT_DATABASE))
    except ImproperlyConfigured as e:
        msg = 'Datbase misconfigured: "{!s}"'.format(e)
        errors.append(checks.Error(msg, id=ERROR_MISCONFIGURED_DATABASE))
    else:
        if not connection.is_usable():
            errors.append(
                checks.Error('Database connection is not usable',
                             id=ERROR_UNUSABLE_DATABASE))

    return errors
Example #36
0
def handler(event, context):
    os.environ.setdefault("DJANGO_SETTINGS_MODULE",
                          "cmcs_regulations.settings")
    import django
    django.setup()

    from django.db import connection
    connection.ensure_connection()
    if not connection.is_usable():
        raise Exception("database is unreachable")

    from django.apps import apps
    installed_apps = []
    for app in apps.get_app_configs():
        installed_apps.append(app.label)

    from django.core.management import call_command
    from django.core.management import CommandError
    for app in installed_apps:
        try:
            call_command("migrate", app)
        except CommandError:
            pass
Example #37
0
    def __init__(self, opts=0, protos=0):

        """ Initialize the milter

        :param opts: SMFIF-options for this milter
        :param protos: SMFIP-options for this milter
        :return: The milter
        """

        lm.MilterProtocol.__init__(self, opts, protos)
        lm.ForkMixin.__init__(self)
            
        self.helper = MilterHelper(configuration)

        logging.debug("Initialising Milter Fork")

        # Test wherever the django database connection is still
        # usable and if not, close it too spawn a new connection
        if connection.connection:
            if not connection.is_usable():
                logging.debug("Found dead database connection, "
                              "closing it now...")
                connection.close()
Example #38
0
 def save(self,
          force_insert=False,
          force_update=False,
          using=None,
          update_fields=None):
     """
     This function save the instance of the model, or create it
     :param force_insert:
     :param force_update:
     :param using:
     :param update_fields:
     :return: None
     """
     if self.is_saved(raise_exception=False):
         return
     if not self.file_type:
         self.file_type = self.file.file.content_type
     OCRedFile.is_valid_file_type(file_type=self.file_type,
                                  raise_exception=True)
     content = self.file.file.read()  # read content of the 'file' field
     self.file.file.seek(
         0
     )  # return the reading pointer of the 'file' file to start position
     # calculate md5 of 'file' field if if does not exist
     if not self.md5:
         self.md5 = md5(content)
     OCRedFile.is_valid_ocr_md5(md5_value=self.md5, raise_exception=True)
     # extract of ocr a content of the 'file' field if 'text' does not exist
     if not self.text:
         print('OCRedFile->save start OCR ' + self.md5)
         ocr_started_datetime = datetime.now()
         if 'image' in self.file_type:
             pdf_content = ocr_img2pdf(content)
             self.text = pdf2text(pdf_content)
             if len(self.text):
                 # create ocred_pdf only for an image that contains a text
                 self.ocred_pdf_md5 = md5(pdf_content)
                 if getattr(settings, 'OCR_STORE_PDF',
                            ocr_default_settings.STORE_PDF):
                     self.ocred_pdf.save(set_pdffile_name(self),
                                         BytesIO(pdf_content), False)
                 else:
                     self.ocred_pdf.name = set_pdffile_name(self)
             self.ocred = timezone.now()
         elif 'pdf' in self.file_type:
             info = pdf_info(content)
             self.pdf_num_pages = info['numPages']
             self.pdf_author = info['Author']
             if info['CreationDate']:
                 self.pdf_creation_date = info['CreationDate']
             self.pdf_creator = info['Creator']
             if info['ModDate']:
                 self.pdf_mod_date = info['ModDate']
             self.pdf_producer = info['Producer']
             self.pdf_title = info['Title']
             pdf_text = pdf2text(content)
             # check that loaded PDF file contains text
             if pdf_need_ocr(pdf_text):
                 print('OCRedFile PDF OCR processing via OCRmyPDF ' +
                       self.md5)
                 filename = set_pdffile_name(self)
                 self.text = ocr_pdf(content, filename)
                 self.ocred = timezone.now(
                 )  # save datetime when uploaded PDF was ocred
                 if len(self.text):
                     # create ocred_pdf only for a pdf file that contains images with text
                     self.ocred_pdf.name = filename
                     self.ocred_pdf_md5 = md5(read_binary_file(filename))
                     if not getattr(settings, 'OCR_STORE_PDF',
                                    ocr_default_settings.STORE_PDF):
                         if os.path.isfile(filename):
                             os.remove(filename)
                 else:
                     # remove created by ocr_pdf(content, filename) pdf file
                     if os.path.isfile(filename):
                         os.remove(filename)
             else:
                 print('OCRedFile->save use text from loaded pdf ' +
                       self.md5)
                 self.text = pdf_text
         ocr_finished_datetime = datetime.now()
         print(
             f"OCRedFile->save finished OCR '{str(ocr_finished_datetime-ocr_started_datetime)}' {self.md5}"
         )
     if not connection.is_usable():
         try:
             connection.connect()
         except Exception as e:
             print(f"database reconnection exception {self.md5}")
     super(OCRedFile, self).save(force_insert=False,
                                 force_update=False,
                                 using=None,
                                 update_fields=None)
     if not getattr(settings, 'OCR_STORE_FILES',
                    ocr_default_settings.STORE_FILES):
         os.remove(self.file.path)
     OCRedFile.Counters.num_created_instances += 1
Example #39
0
def archive(archive_id):
    """
    执行数据库归档
    :return:
    """
    archive_info = ArchiveConfig.objects.get(id=archive_id)
    s_ins = archive_info.src_instance
    src_db_name = archive_info.src_db_name
    src_table_name = archive_info.src_table_name
    condition = archive_info.condition
    no_delete = archive_info.no_delete
    sleep = archive_info.sleep
    mode = archive_info.mode

    # 获取归档表的字符集信息
    s_engine = get_engine(s_ins)
    db = s_engine.schema_object.databases[src_db_name]
    tb = db.tables[src_table_name]
    charset = tb.options['charset'].value
    if charset is None:
        charset = db.options['charset'].value

    pt_archiver = PtArchiver()
    # 准备参数
    source = fr"h={s_ins.host},u={s_ins.user},p={s_ins.password},P={s_ins.port},D={src_db_name},t={src_table_name}"
    args = {
        "no-version-check": True,
        "source": source,
        "where": condition,
        "progress": 5000,
        "statistics": True,
        "charset": charset,
        "limit": 10000,
        "txn-size": 1000,
        "sleep": sleep
    }

    # 归档到目标实例
    if mode == 'dest':
        d_ins = archive_info.dest_instance
        dest_db_name = archive_info.dest_db_name
        dest_table_name = archive_info.dest_table_name
        dest = fr"h={d_ins.host},u={d_ins.user},p={d_ins.password},P={d_ins.port},D={dest_db_name},t={dest_table_name}"
        args['dest'] = dest
        args['bulk-insert'] = True
        if no_delete:
            args['no-delete'] = True
        else:
            args['bulk-delete'] = True
    elif mode == 'file':
        output_directory = os.path.join(settings.BASE_DIR,
                                        'downloads/archiver')
        os.makedirs(output_directory, exist_ok=True)
        args[
            'file'] = f'{output_directory}/{s_ins.instance_name}-{src_db_name}-{src_table_name}.txt'
        if no_delete:
            args['no-delete'] = True
        else:
            args['bulk-delete'] = True
    elif mode == 'purge':
        args['purge'] = True

    # 参数检查
    args_check_result = pt_archiver.check_args(args)
    if args_check_result['status'] == 1:
        return JsonResponse(args_check_result)
    # 参数转换
    cmd_args = pt_archiver.generate_args2cmd(args, shell=True)
    # 执行命令,获取结果
    select_cnt = 0
    insert_cnt = 0
    delete_cnt = 0
    with FuncTimer() as t:
        p = pt_archiver.execute_cmd(cmd_args, shell=True)
        stdout = ''
        for line in iter(p.stdout.readline, ''):
            if re.match(r'^SELECT\s(\d+)$', line, re.I):
                select_cnt = re.findall(r'^SELECT\s(\d+)$', line)
            elif re.match(r'^INSERT\s(\d+)$', line, re.I):
                insert_cnt = re.findall(r'^INSERT\s(\d+)$', line)
            elif re.match(r'^DELETE\s(\d+)$', line, re.I):
                delete_cnt = re.findall(r'^DELETE\s(\d+)$', line)
            stdout += f'{line}\n'
    statistics = stdout
    # 获取异常信息
    stderr = p.stderr.read()
    if stderr:
        statistics = stdout + stderr

    # 判断归档结果
    select_cnt = int(select_cnt[0]) if select_cnt else 0
    insert_cnt = int(insert_cnt[0]) if insert_cnt else 0
    delete_cnt = int(delete_cnt[0]) if delete_cnt else 0
    error_info = ''
    success = True
    if stderr:
        error_info = f'命令执行报错:{stderr}'
        success = False
    if mode == 'dest':
        # 删除源数据,判断删除数量和写入数量
        if not no_delete and (insert_cnt != delete_cnt):
            error_info = f"删除和写入数量不一致:{insert_cnt}!={delete_cnt}"
            success = False
    elif mode == 'file':
        # 删除源数据,判断查询数量和删除数量
        if not no_delete and (select_cnt != delete_cnt):
            error_info = f"查询和删除数量不一致:{select_cnt}!={delete_cnt}"
            success = False
    elif mode == 'purge':
        # 直接删除。判断查询数量和删除数量
        if select_cnt != delete_cnt:
            error_info = f"查询和删除数量不一致:{select_cnt}!={delete_cnt}"
            success = False

    # 执行信息保存到数据库
    if connection.connection and not connection.is_usable():
        close_old_connections()
    # 更新最后归档时间
    ArchiveConfig(
        id=archive_id,
        last_archive_time=t.end).save(update_fields=['last_archive_time'])
    # 替换密码信息后保存
    ArchiveLog.objects.create(
        archive=archive_info,
        cmd=cmd_args.replace(s_ins.password, '***').replace(
            d_ins.password, '***') if mode == 'dest' else cmd_args.replace(
                s_ins.password, '***'),
        condition=condition,
        mode=mode,
        no_delete=no_delete,
        sleep=sleep,
        select_cnt=select_cnt,
        insert_cnt=insert_cnt,
        delete_cnt=delete_cnt,
        statistics=statistics,
        success=success,
        error_info=error_info,
        start_time=t.start,
        end_time=t.end)
    if not success:
        raise Exception(f'{error_info}\n{statistics}')
Example #40
0
def check_status(request):
    if connection.is_usable():
        database_status = "working"
    else:
        database_status = "down"
    return render(request, "status.html", {"database_status": database_status})
Example #41
0
def execute_callback(task):
    """异步任务的回调, 将结果填入数据库等等
    使用django-q的hook, 传入参数为整个task
    task.result 是真正的结果
    """
    # https://stackoverflow.com/questions/7835272/django-operationalerror-2006-mysql-server-has-gone-away
    if connection.connection and not connection.is_usable():
        close_old_connections()
    workflow_id = task.args[0]
    # 判断工单状态,如果不是执行中的,不允许更新信息,直接抛错记录日志
    with transaction.atomic():
        workflow = SqlWorkflow.objects.get(id=workflow_id)
        if workflow.status != 'workflow_executing':
            raise Exception(f'工单{workflow.id}状态不正确,禁止重复更新执行结果!')

    workflow.finish_time = task.stopped

    if not task.success:
        # 不成功会返回错误堆栈信息,构造一个错误信息
        workflow.status = 'workflow_exception'
        execute_result = ReviewSet(
            full_sql=workflow.sqlworkflowcontent.sql_content)
        execute_result.rows = [
            ReviewResult(stage='Execute failed',
                         errlevel=2,
                         stagestatus='异常终止',
                         errormessage=task.result,
                         sql=workflow.sqlworkflowcontent.sql_content)
        ]
    elif task.result.warning or task.result.error:
        execute_result = task.result
        workflow.status = 'workflow_exception'
    else:
        execute_result = task.result
        workflow.status = 'workflow_finish'
    try:
        # 保存执行结果
        workflow.sqlworkflowcontent.execute_result = execute_result.json()
        workflow.sqlworkflowcontent.save()
        workflow.save()
    except Exception as e:
        logger.error(f'SQL工单回调异常: {workflow_id} {traceback.format_exc()}')
        SqlWorkflow.objects.filter(id=workflow_id).update(
            finish_time=task.stopped,
            status='workflow_exception',
        )
        workflow.sqlworkflowcontent.execute_result = {f'{e}'}
        workflow.sqlworkflowcontent.save()
    # 增加工单日志
    audit_id = Audit.detail_by_workflow_id(
        workflow_id=workflow_id,
        workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
    Audit.add_log(audit_id=audit_id,
                  operation_type=6,
                  operation_type_desc='执行结束',
                  operation_info='执行结果:{}'.format(
                      workflow.get_status_display()),
                  operator='',
                  operator_display='系统')

    # DDL工单结束后清空实例资源缓存
    if workflow.syntax_type == 1:
        r = get_redis_connection("default")
        for key in r.scan_iter(match='*insRes*', count=2000):
            r.delete(key)

    # 开启了Execute阶段通知参数才发送消息通知
    sys_config = SysConfig()
    is_notified = 'Execute' in sys_config.get('notify_phase_control').split(',') \
        if sys_config.get('notify_phase_control') else True
    if is_notified:
        notify_for_execute(workflow)
Example #42
0
def check_db_connection():
    from django.db import connection
    if connection.connection:
        if not connection.is_usable():
            connection.close()
Example #43
0
def query(request):
    """
    获取SQL查询结果
    :param request:
    :return:
    """
    instance_name = request.POST.get('instance_name')
    sql_content = request.POST.get('sql_content')
    db_name = request.POST.get('db_name')
    tb_name = request.POST.get('tb_name')
    limit_num = int(request.POST.get('limit_num', 0))
    schema_name = request.POST.get('schema_name', None)
    user = request.user

    result = {'status': 0, 'msg': 'ok', 'data': {}}
    try:
        instance = user_instances(request.user).get(instance_name=instance_name)
    except Instance.DoesNotExist:
        result['status'] = 1
        result['msg'] = '你所在组未关联该实例'
        return HttpResponse(json.dumps(result), content_type='application/json')

    # 服务器端参数验证
    if None in [sql_content, db_name, instance_name, limit_num]:
        result['status'] = 1
        result['msg'] = '页面提交参数可能为空'
        return HttpResponse(json.dumps(result), content_type='application/json')

    try:
        config = SysConfig()
        # 查询前的检查,禁用语句检查,语句切分
        query_engine = get_engine(instance=instance)
        query_check_info = query_engine.query_check(db_name=db_name, sql=sql_content)
        if query_check_info.get('bad_query'):
            # 引擎内部判断为 bad_query
            result['status'] = 1
            result['msg'] = query_check_info.get('msg')
            return HttpResponse(json.dumps(result), content_type='application/json')
        if query_check_info.get('has_star') and config.get('disable_star') is True:
            # 引擎内部判断为有 * 且禁止 * 选项打开
            result['status'] = 1
            result['msg'] = query_check_info.get('msg')
            return HttpResponse(json.dumps(result), content_type='application/json')
        sql_content = query_check_info['filtered_sql']

        # 查询权限校验,并且获取limit_num
        priv_check_info = query_priv_check(user, instance, db_name, sql_content, limit_num)
        if priv_check_info['status'] == 0:
            limit_num = priv_check_info['data']['limit_num']
            priv_check = priv_check_info['data']['priv_check']
        else:
            result['status'] = priv_check_info['status']
            result['msg'] = priv_check_info['msg']
            return HttpResponse(json.dumps(result), content_type='application/json')
        # explain的limit_num设置为0
        limit_num = 0 if re.match(r"^explain", sql_content.lower()) else limit_num

        # 对查询sql增加limit限制或者改写语句
        sql_content = query_engine.filter_sql(sql=sql_content, limit_num=limit_num)

        # 先获取查询连接,用于后面查询复用连接以及终止会话
        query_engine.get_connection(db_name=db_name)
        thread_id = query_engine.thread_id
        max_execution_time = int(config.get('max_execution_time', 60))
        # 执行查询语句,并增加一个定时终止语句的schedule,timeout=max_execution_time
        if thread_id:
            schedule_name = f'query-{time.time()}'
            run_date = (datetime.datetime.now() + datetime.timedelta(seconds=max_execution_time))
            add_kill_conn_schedule(schedule_name, run_date, instance.id, thread_id)
        with FuncTimer() as t:
            # 获取主从延迟信息
            seconds_behind_master = query_engine.seconds_behind_master
            query_result = query_engine.query(db_name, sql_content, limit_num,
                                              schema_name=schema_name,
                                              tb_name=tb_name,
                                              max_execution_time=max_execution_time * 1000)
        query_result.query_time = t.cost
        # 返回查询结果后删除schedule
        if thread_id:
            del_schedule(schedule_name)

        # 查询异常
        if query_result.error:
            result['status'] = 1
            result['msg'] = query_result.error
        # 数据脱敏,仅对查询无错误的结果集进行脱敏,并且按照query_check配置是否返回
        elif config.get('data_masking'):
            try:
                with FuncTimer() as t:
                    masking_result = query_engine.query_masking(db_name, sql_content, query_result)
                masking_result.mask_time = t.cost
                # 脱敏出错
                if masking_result.error:
                    # 开启query_check,直接返回异常,禁止执行
                    if config.get('query_check'):
                        result['status'] = 1
                        result['msg'] = f'数据脱敏异常:{masking_result.error}'
                    # 关闭query_check,忽略错误信息,返回未脱敏数据,权限校验标记为跳过
                    else:
                        logger.warning(f'数据脱敏异常,按照配置放行,查询语句:{sql_content},错误信息:{masking_result.error}')
                        query_result.error = None
                        result['data'] = query_result.__dict__
                # 正常脱敏
                else:
                    result['data'] = masking_result.__dict__
            except Exception as msg:
                # 抛出未定义异常,并且开启query_check,直接返回异常,禁止执行
                if config.get('query_check'):
                    result['status'] = 1
                    result['msg'] = f'数据脱敏异常,请联系管理员,错误信息:{msg}'
                # 关闭query_check,忽略错误信息,返回未脱敏数据,权限校验标记为跳过
                else:
                    logger.warning(f'数据脱敏异常,按照配置放行,查询语句:{sql_content},错误信息:{msg}')
                    query_result.error = None
                    result['data'] = query_result.__dict__
        # 无需脱敏的语句
        else:
            result['data'] = query_result.__dict__

        # 仅将成功的查询语句记录存入数据库
        if not query_result.error:
            result['data']['seconds_behind_master'] = seconds_behind_master
            if int(limit_num) == 0:
                limit_num = int(query_result.affected_rows)
            else:
                limit_num = min(int(limit_num), int(query_result.affected_rows))
            query_log = QueryLog(
                username=user.username,
                user_display=user.display,
                db_name=db_name,
                instance_name=instance.instance_name,
                sqllog=sql_content,
                effect_row=limit_num,
                cost_time=query_result.query_time,
                priv_check=priv_check,
                hit_rule=query_result.mask_rule_hit,
                masking=query_result.is_masked
            )
            # 防止查询超时
            if connection.connection and not connection.is_usable():
                close_old_connections()
            query_log.save()
    except Exception as e:
        logger.error(f'查询异常报错,查询语句:{sql_content}\n,错误信息:{traceback.format_exc()}')
        result['status'] = 1
        result['msg'] = f'查询异常报错,错误信息:{e}'
        return HttpResponse(json.dumps(result), content_type='application/json')
    # 返回查询结果
    try:
        return HttpResponse(json.dumps(result, use_decimal=False, cls=ExtendJSONEncoderFTime, bigint_as_string=True),
                            content_type='application/json')
    # 虽然能正常返回,但是依然会乱码
    except UnicodeDecodeError:
        return HttpResponse(json.dumps(result, default=str, bigint_as_string=True, encoding='latin1'),
                            content_type='application/json')
Example #44
0
    def run(self):
        # get the vehicle record
        try:
            vehicle = Vehicle.objects.get(veh_name=self.vehicle_name)
        except:
            logger.error(
                "%s: Vehicle '%s' does not exist in database. Add it first.",
                MY_NAME, self.vehicle_name)
            sys.exit(2)

        # start GPS polling thread
        self.gps_poller.start()

        # catch signals for proper shutdown
        for sig in (SIGABRT, SIGTERM, SIGINT):
            signal(sig, self.cleanup)

        # main execution loop
        while True:
            try:
                time.sleep(self.interval)
                # If we are idle too long the database server may
                # close the connection on us, ping the server to check if
                # the connection is still up.
                if (connection.connection is not None):
                    if (connection.is_usable()):
                        logger.debug('%s: Database connection is up.', MY_NAME)
                    else:
                        logger.error('%s: Database connection is down.',
                                     MY_NAME)
                        connection.close()
                else:
                    logger.error('%s: Database connection is closed.', MY_NAME)

                # process GPS data
                session = self.gps_poller.session
                if (session.fix.mode == MODE_NO_FIX) and not self.nofix:
                    logger.info("%s: Waiting for GPS to fix...", MY_NAME)
                    continue

                if not isnan(session.fix.time):
                    if (session.fix.speed < 0.1) and (self.last_speed < 0.1):
                        continue
                    self.last_speed = session.fix.speed
                    # if the time is valid the data record is valid
                    location = Location()
                    location.loc_vehicle = vehicle
                    location.loc_time = session.utc
                    location.loc_latitude = session.fix.latitude
                    location.loc_longitude = session.fix.longitude
                    if (session.fix.mode == MODE_3D):
                        location.loc_altitude = session.fix.altitude
                    location.loc_speed = session.fix.speed
                    location.loc_climb = session.fix.climb
                    location.loc_track = session.fix.track
                    location.save()
                    logger.info("%s: Valid location: %s", MY_NAME, location)
                else:
                    logger.debug("%s: Invalid location: %s", MY_NAME)

            except KeyboardInterrupt:
                print('\n')
                break
Example #45
0
 def save(self,
          force_insert=False,
          force_update=False,
          using=None,
          update_fields=None):
     """
     This function save the instance of the model, or create it
     :param force_insert:
     :param force_update:
     :param using:
     :param update_fields:
     :return: None
     """
     if self.is_saved(raise_exception=False):
         return
     if not self.file_type:
         self.file_type = self.file.file.content_type
     OCRedFile.is_valid_file_type(file_type=self.file_type,
                                  raise_exception=True)
     # read content of the 'file' field
     content = self.file.file.read()
     # return the reading pointer of the 'file' file to start position
     self.file.file.seek(0)
     # calculate md5 of 'file' field if if does not exist
     if not self.md5:
         self.md5 = md5(content)
     OCRedFile.is_valid_ocr_md5(md5_value=self.md5, raise_exception=True)
     # extract of ocr a content of the 'file' field if 'text' does not exist
     if not self.text:
         print(f'OCRedFile->save start OCR {self.md5}')
         ocr_started_datetime = timezone.now()
         if 'image' in self.file_type:
             pdf_content = ocr_img2pdf(content)
             self.text = pdf2text(pdf_content)
             if len(self.text):
                 # create ocred_pdf only for an image that contains a text
                 self.ocred_pdf_md5 = md5(pdf_content)
                 if ocr_settings.OCR_STORE_PDF:
                     self.ocred_pdf.save(set_pdffile_name(self),
                                         BytesIO(pdf_content), False)
                 else:
                     self.ocred_pdf.name = set_pdffile_name(self)
             self.ocred = timezone.now()
         elif 'pdf' in self.file_type:
             pdf_info: PdfInfo = get_pdf_info(content)
             self.pdf_num_pages = pdf_info.num_pages
             self.pdf_author = pdf_info.author
             if pdf_info.creation_date:
                 self.pdf_creation_date = pdf_info.creation_date
             self.pdf_creator = pdf_info.creator
             if pdf_info.mod_date:
                 self.pdf_mod_date = pdf_info.mod_date
             self.pdf_producer = pdf_info.producer
             self.pdf_title = pdf_info.title
             pdf_text = pdf2text(content)
             # check that loaded PDF file contains text
             if pdf_need_ocr(pdf_text):
                 print(
                     f'OCRedFile PDF OCR processing via OCRmyPDF {self.md5}'
                 )
                 pdf_filename = set_pdffile_name(self)
                 self.text = ocr_pdf(content, pdf_filename)
                 self.ocred = timezone.now(
                 )  # save datetime when uploaded PDF was ocred
                 if len(self.text):
                     # create ocred_pdf only for a pdf file that contains images with text
                     self.ocred_pdf.name = pdf_filename
                     self.ocred_pdf_md5 = md5(
                         read_binary_file(pdf_filename))
                     if not ocr_settings.OCR_STORE_PDF:
                         if os.path.isfile(pdf_filename):
                             os.remove(pdf_filename)
                 else:
                     # remove the PDF file created by ocr_pdf(content, pdf_filename)
                     if os.path.isfile(pdf_filename):
                         os.remove(pdf_filename)
             else:
                 print(
                     f'OCRedFile->save use text from loaded pdf {self.md5}')
                 self.text = pdf_text
         ocr_finished_datetime = timezone.now()
         ocr_duration: timedelta = ocr_finished_datetime - ocr_started_datetime
         print(
             f"OCRedFile->save finished OCR '{ocr_duration.seconds}.{ocr_duration.microseconds}' ms {self.md5}"
         )
     if not ocr_settings.OCR_STORE_FILES:
         os.remove(self.file.path)
     # update counters
     OCRedFile.Counters.num_created_instances += 1
     # checking database connection
     if not connection.is_usable():
         try:
             connection.connect()
         except Exception as e:
             print(f"database reconnection exception {self.md5}")
     # parent method
     super(OCRedFile, self).save(force_insert=False,
                                 force_update=False,
                                 using=None,
                                 update_fields=None)
Example #46
0
    def run(self):
        # Execution starts here
        rvi_logger.info('RVI Server: Starting...')

        conf = get_settings()

        rvi_logger.info('RVI Server: General Configuration: ' + 
            'RVI_SERVICE_EDGE_URL: '  + conf['SERVICE_EDGE_URL']  + ', ' +
            'MEDIA_ROOT: '            + conf['MEDIA_ROOT']
            )

        # setup RVI Service Edge
        rvi_logger.info('RVI Server: Setting up outbound connection to RVI Service Edge at %s', conf['SERVICE_EDGE_URL'])
        self.rvi_service_edge = jsonrpclib.Server(conf['SERVICE_EDGE_URL'])

        # SOTA Startup
        if conf['SOTA_ENABLE'] == True:
            # log SOTA configuration
            rvi_logger.info('RVI Server: SOTA Configuration: ' + 
                'RVI_SOTA_CALLBACK_URL: ' + conf['SOTA_CALLBACK_URL'] + ', ' +
                'RVI_SOTA_SERVICE_ID: '   + conf['SOTA_SERVICE_ID']   + ', ' +
                'RVI_SOTA_CHUNK_SIZE: '   + str(conf['SOTA_CHUNK_SIZE'])
                )
            # start the SOTA callback server
            try:
                rvi_logger.info('RVI Server: Starting SOTA Callback Server on %s with service id %s.', conf['SOTA_CALLBACK_URL'], conf['SOTA_SERVICE_ID'])
                self.sota_cb_server = SOTACallbackServer(self.rvi_service_edge, conf['SOTA_CALLBACK_URL'], conf['SOTA_SERVICE_ID'])
                self.sota_cb_server.start()
                rvi_logger.info('RVI Server: SOTA Callback Server started.')
            except Exception as e:
                rvi_logger.error('RVI Server: Cannot start SOTA Callback Server: %s', e)
                sys.exit(1)

            # wait for SOTA callback server to come up    
            time.sleep(0.5)

            # start SOTA Transmission Server
            try:
                rvi_logger.info('RVI Server: Starting SOTA Transmission Server.')
                self.sota_tx_server = SOTATransmissionServer(self.rvi_service_edge, conf['SOTA_SERVICE_ID'], conf['SOTA_CHUNK_SIZE'])
                self.sota_tx_server.start()
                rvi_logger.info('RVI Server: SOTA Transmission Server started.')
            except Exception as e:
                rvi_logger.error('RVI Server: Cannot start SOTA Transmission Server: %s', e)
                sys.exit(1)
    
            # wait for SOTA transmission server to come up    
            time.sleep(0.5)
            
        # Tracking Startup
        if conf['TRACKING_ENABLE'] == True:
            # log Tracking configuration
            rvi_logger.info('RVI Server: Tracking Configuration: ' + 
                'RVI_TRACKING_CALLBACK_URL: ' + conf['TRACKING_CALLBACK_URL'] + ', ' +
                'RVI_TRACKING_SERVICE_ID: '   + conf['TRACKING_SERVICE_ID']
                )
            # start the Tracking callback server
            try:
                rvi_logger.info('RVI Server: Starting Tracking Callback Server on %s with service id %s.', conf['TRACKING_CALLBACK_URL'], conf['TRACKING_SERVICE_ID'])
                self.tracking_cb_server = TrackingCallbackServer(self.rvi_service_edge, conf['TRACKING_CALLBACK_URL'], conf['TRACKING_SERVICE_ID'])
                self.tracking_cb_server.start()
                rvi_logger.info('RVI Server: Tracking Callback Server started.')
            except Exception as e:
                rvi_logger.error('RVI Server: Cannot start Tracking Callback Server: %s', e)
                sys.exit(1)

            # wait for SOTA callback server to come up    
            time.sleep(0.5)

        else:
            rvi_logger.info('RVI Server: Tracking not enabled')
        
        # Publish to Kafka Message Queue
        if conf['TRACKING_MQ_PUBLISH'] == True:
            #log kafka configuration
            rvi_logger.info('RVI Server: Publishing to Kafka Message Queue: ' + conf['TRACKING_MQ_URL'] + ' , with topic: ' + conf['TRACKING_MQ_TOPIC'])

            #Start the Kafka message queue forwarding server
            try:
                rvi_logger.info('%s: Publishing to message queue enabled.', self.__class__.__name__)
                self.mq_sink_server = MQSinkServer(self.rvi_service_edge, conf['TRACKING_CALLBACK_URL'], conf['TRACKING_SERVICE_ID'])
                self.mq_sink_server.start()
                rvi_logger.info('RVI Server: Message Queue Server started.')
            except Exception as e:
                rvi_logger.error('RVI Server: Cannot start Message Queue Server: %s', e)
                sys.exit(1)

        else:
            rvi_logger.info('RVI Server: MQ Publish not enabled')

        # Save message Queue contents into HBase
        if conf['TRACKING_MQ_HBASE'] == True:
            rvi_logger.info('RVI Server: Saving to HBase: ' + conf['TRACKING_MQ_HBASE_URL'])
           
            #Start HBase Server thread
            try:
                rvi_logger.info('%s: Saving messages to HBase enabled.', self.__class__.__name__)
                self.hbase_server = HBaseServer(conf['TRACKING_MQ_URL'],conf['TRACKING_MQ_TOPIC'],conf['TRACKING_MQ_HBASE_URL'], conf['TRACKING_MQ_HBASE_PORT'], conf['TRACKING_MQ_HBASE_TABLE']) 
                self.hbase_server.start()
                rvi_logger.info('RVI Server: Kafka -> HBase consumer started.')
            except Exception as e:
                rvi_logger.error('RVI Server: Cannot start HBase Server: %s', e)
                sys.exit(1)
        else:
            rvi_logger.info('RVI Server: HBase server storage not enabled')
        
        # catch signals for proper shutdown
        for sig in (SIGABRT, SIGTERM, SIGINT):
            signal(sig, self.cleanup)

        # main execution loop
        timeout = conf['DB_CLOSE_TIMEOUT']
        while True:
            try:
                time.sleep(conf['DB_PING_INTERVAL'])
                # If we are idle too long the database server may
                # close the connection on us, ping the server to check if
                # the connection is still up.
                if (connection.connection is not None):
                    if (connection.is_usable() == True): 
                        rvi_logger.debug('RVI Server: Database connection is up.')
                        # Close connection if open longer than the timeout
                        timeout -= conf['DB_PING_INTERVAL']
                        if (timeout <= 0):
                            connection.close()
                            timeout = conf['DB_CLOSE_TIMEOUT']
                            rvi_logger.info('RVI Server: Idle Timeout: closed database connection.')
                    else:
                        rvi_logger.error('RVI Server: Database connection is down.')
                        connection.close()
                elif (conf['TRACKING_MQ_PUBLISH'] == True and conf['TRACKING_ENABLE'] == False):
                    pass
                else:    
                    rvi_logger.error('RVI Server: Database connection is closed.')
                    # As long as the connection is closed reset the timeout
                    timeout = conf['DB_CLOSE_TIMEOUT']
                    
            except KeyboardInterrupt:
                print ('\n')
                break
Example #47
0
def check_status(request):
    if connection.is_usable():
        database_status = 'working'
    else:
        database_status = 'down'
    return render(request, 'status.html', {'database_status': database_status})