Example #1
0
    def __init__(self,
                 userId,
                 passwd,
                 sid,
                 encoding="utf-8",
                 nencoding="utf-8"):
        DBWrapper.__init__(self)

        try:
            log.debug("Oracle connection info. (%s/%s@%s)" %
                      (userId, passwd, sid))
            self.connector = cx_Oracle.connect(userId,
                                               passwd,
                                               sid,
                                               encoding=encoding,
                                               nencoding=nencoding)
        except:
            log.exception()
            raise

        try:
            self.cursor = self.connector.cursor()
        except:
            log.exception()
            raise
Example #2
0
def save_file(fname, fobj, replace=False, buffer_size=4096):
    assert hasattr(fobj, "read"), "fobj parameter should be a file-like object"
    path = os.path.dirname(fname)
    if not os.path.exists(path):
        try:
            os.makedirs(path)
        except Exception as e:
            log.exception(e)
            raise Exception("Can't create %s directory" % path)

    if not replace:
        ff, ext = os.path.splitext(fname)
        i = 1
        while os.path.exists(fname):
            fname = ff + "(" + str(i) + ")" + ext
            i += 1

    out = open(fname, "wb")
    try:
        while 1:
            text = fobj.read(buffer_size)
            if text:
                out.write(text)
            else:
                break
        return os.path.basename(fname)
    finally:
        out.close()
    def run(self):
        log.debug('Start thread.')

        try:
            # Выполняем запрос к vk, чтобы получить список аудизаписей
            rs = self.vk.method('audio.get')

            # TODO: за один запрос vk может и не выдать все аудизаписи
            audio_list = rs['items']

            self.about_range_progress.emit(0, len(audio_list))

            for i, audio in enumerate(audio_list, 0):
                if not self._is_run:
                    break

                try:
                    artist = audio['artist'].strip().title()
                    title = audio['title'].strip().capitalize()
                    title = artist + ' - ' + title
                    url = audio['url']

                    self.about_progress.emit(i)
                    self.about_add_audio.emit(title, url)

                except Exception as e:
                    log.exception('Error: {}, audio id={} owner_id={}'.format(
                        e, audio['id'], audio['owner_id']))

        finally:
            log.debug('Finish thread.')
Example #4
0
def save_file(fname, fobj, replace=False, buffer_size=4096):
    assert hasattr(fobj, 'read'), "fobj parameter should be a file-like object"
    path = os.path.dirname(fname)
    if not os.path.exists(path):
        try:
            os.makedirs(path)
        except Exception as e:
            log.exception(e)
            raise Exception("Can't create %s directory" % path)
    
    if not replace:
        ff, ext = os.path.splitext(fname)
        i = 1
        while os.path.exists(fname):
            fname = ff+'('+str(i)+')'+ext
            i += 1
        
    out = open(fname, 'wb')
    try:
        while 1:
            text = fobj.read(buffer_size)
            if text:
                out.write(text)
            else:
                break
        return os.path.basename(fname)
    finally:
        out.close()
Example #5
0
 def get_necessary_info(self):
     log.info('begin get_necessary_info!!!')
     split_result = '|awk -F \'[=]\' \'{print $2}\''
     get_instance = '|grep TINSTANCE{}'.format(split_result)
     dir_instance = '|grep DIR_INSTANCE{}'.format(split_result)
     get_instance_cmd = "su - {} -c 'env'{}".format(self.hana_adm,
                                                    get_instance)
     get_dir_instance_cmd = "su - {} -c 'env'{}".format(
         self.hana_adm, dir_instance)
     try:
         log.info('get_instance_cmd:{}'.format(get_instance_cmd))
         log.info('get_dir_instance_cmd:{}'.format(get_dir_instance_cmd))
         instance_result = exec_cmd2(get_instance_cmd)
         if instance_result['ret'] != 0:
             raise DbfenError(
                 20120073,
                 instance_result['msg'] + instance_result['errmsg'])
         dir_instance_result = exec_cmd2(get_dir_instance_cmd)
         if dir_instance_result['ret'] != 0:
             raise DbfenError(
                 20120074,
                 dir_instance_result['msg'] + dir_instance_result['errmsg'])
     except Exception as ex:
         log.exception(ex)
         raise ex
     return {
         'instance': instance_result['msg'],
         'dir_instance': dir_instance_result['msg']
     }
Example #6
0
def save_file(fname, fobj, replace=False, buffer_size=4096):
    assert hasattr(fobj, 'read'), "fobj parameter should be a file-like object"
    path = os.path.dirname(fname)
    if not os.path.exists(path):
        try:
            os.makedirs(path)
        except Exception, e:
            log.exception(e)
            raise Exception("Can't create %s directory" % path)
 def connect_gracc_url(self, gracc_url):
     try:
         self.es = elasticsearch.Elasticsearch(
             [gracc_url], timeout=300, use_ssl=True, verify_certs=True,
             ca_certs='/etc/ssl/certs/ca-bundle.crt')
     except Exception, e:
         log.exception(e)
         log.error("Unable to connect to GRACC database")
         raise
Example #8
0
def save_file(fname, fobj, replace=False, buffer_size=4096):
    assert hasattr(fobj, 'read'), "fobj parameter should be a file-like object"
    path = os.path.dirname(fname)
    if not os.path.exists(path):
        try:
            os.makedirs(path)
        except Exception, e:
            log.exception(e)
            raise Exception("Can't create %s directory" % path)
Example #9
0
def main():

    try:
        main_unwrapped()
    except SystemExit:
        raise
    except (Exception, KeyboardInterrupt), e:
        log.error(str(e))
        log.exception(e)
        raise
def main():

    try:
        main_unwrapped()
    except SystemExit:
        raise
    except (Exception, KeyboardInterrupt), e:
        log.error(str(e))
        log.exception(e)
        raise
    def connect(self):
        gracc_url = self.cp.get("GRACC Transfer", "Url")
        #gracc_url = 'https://gracc.opensciencegrid.org/q'

        try:
            self.es = elasticsearch.Elasticsearch(
                [gracc_url], timeout=300, use_ssl=True, verify_certs=True,
                ca_certs='/etc/ssl/certs/ca-bundle.crt')
        except Exception, e:
            log.exception(e)
            log.error("Unable to connect to GRACC database")
            raise
 def connect_transfer(self):
     user = self.cp.get("Gratia Transfer", "User")
     password = self.cp.get("Gratia Transfer", "Password")
     host = self.cp.get("Gratia Transfer", "Host")
     database = self.cp.get("Gratia Transfer", "Database")
     port = int(self.cp.get("Gratia Transfer", "Port"))
     try:
         self.conn = MySQLdb.connect(user=user, passwd=password, host=host,
             port=port, db=database)
     except Exception, e:
         log.exception(e)
         log.error("Unable to connect to Gratia Transfer DB")
         raise
 def connect(self):
     user = self.cp.get("Gratia", "User")
     password = self.cp.get("Gratia", "Password")
     host = self.cp.get("Gratia", "Host")
     database = self.cp.get("Gratia", "Database")
     port = int(self.cp.get("Gratia", "Port"))
     try:
         self.conn = MySQLdb.connect(user=user, passwd=password, host=host,
             port=port, db=database)
         log.info("Successfully connected to Gratia database")
     except Exception, e:
         log.exception(e)
         log.error("Unable to connect to Gratia database")
         raise
Example #14
0
 def show_hana_databases(self, is_show=True):
     log.info('begin show_hana_databases!!!')
     sql_cmd = r"select DATABASE_NAME from SYS.M_DATABASES where ACTIVE_STATUS=\'YES\'"
     exec_command = self.system_db_exec_command_str(sql_cmd)
     exec_command_log = exec_command.replace(
         r'-p \"{}\"'.format(self.system_db_pwd), '-p ******')
     log.info('check_system_db_cmd:{}'.format(exec_command_log))
     try:
         result = exec_cmd2(exec_command)
         log.info('result is:{}'.format(result))
         status = result['ret']
         output = result['msg'].strip()
         if status != 0 and self.tenant_user == '':
             log.error(
                 'system db abnormal,please check system db!!!,maybe is password not correct or others!!!'
             )
             raise DbfenError(20120082, result['msg'] + result['errmsg'])
         databases = self.split_result_database_str(output)
         if len(databases) == 1:
             log.error(
                 'system db abnormal,please check system db!!!,maybe is password not correct or others!!!'
             )
             raise DbfenError(20120082, result['msg'] + result['errmsg'])
         if is_show:
             return databases
         exec_command = self.system_db_exec_command_str(sql_cmd, False)
         exec_command_log = exec_command.replace(
             r'-p \"{}\"'.format(self.tenant_passwd), '-p ******')
         log.info('check target db {} is whether normal!!!'.format(
             exec_command_log))
         result = exec_cmd2(exec_command)
         status = result['ret']
         output = result['msg'].strip()
         tenant = self.split_result_database_str(output)
         if len(tenant) == 0:
             log.error(
                 'tenant db not active!!!please check tenant {} status!!!'.
                 format(self.target_db))
             raise DbfenError(20120083, result['msg'] + result['errmsg'])
         if status != 0:
             log.error('maybe is db: {} password incorrect!!!'.format(
                 self.target_db))
             raise DbfenError(20120076, result['msg'] + result['errmsg'])
         return tenant
     except Exception as ex:
         log.exception(ex)
         raise ex
Example #15
0
def main():
    while True:
        try:
            # initialize postgres database connection
            pg = PgHandler(dbname,
                           username=pg_username,
                           password=pg_password,
                           host=pg_server)
            pg.create_schemas()
            pg.create_tables()
            log(
                POSTGRES_TYPE, log_pb2.INFO,
                "Postgres database has been created and available for connections"
            )
            break
        except Exception:
            exception(
                msg=
                "Unexpected error happens when initializing postgres, retrying."
            )
            time.sleep(10)
Example #16
0
def run_docker(address, interval, host, port, debug=False):
    prev_cpu, prev_system = {}, {}
    prev_tx_bytes, prev_rx_bytes, prev_timer = {}, {}, {}
    client = statsd.StatsClient(host, port)
    MEM_USAGE = jmespath.compile('memory_stats.usage')
    MEM_LIMIT = jmespath.compile('memory_stats.limit')
    TOTAL_USAGE = jmespath.compile('cpu_stats.cpu_usage.total_usage')
    SYSTEM_USAGE = jmespath.compile('cpu_stats.system_cpu_usage')
    NUM_CPUS = jmespath.compile('length(cpu_stats.cpu_usage.percpu_usage)')
    TX_BYTES = jmespath.compile('networks.eth0.tx_bytes')  # TODO: Always eth0??? (likely not...)
    RX_BYTES = jmespath.compile('networks.eth0.rx_bytes')
    try:
        while True:
            with client.pipeline() as pipe:
                start = time.time()
                containers = get(address, '/containers/json?all=1', debug)
                for container in containers:
                    name = container.get('Names')[0].strip('/')
                    status = container.get('Status')
                    id_ = container.get('Id')
                    log.debug("{}: {}".format(name, status))
                    stats = get(address, '/containers/{}/stats?stream=0'.format(id_), debug)  # Very slow call...

                    mem_usage = MEM_USAGE.search(stats) or 0
                    mem_limit = MEM_LIMIT.search(stats) or 1
                    mem_percent = 100.0 * (mem_usage / mem_limit) if mem_limit > 0 else 0

                    if debug:
                        log.debug("{}: Mem: {:,} {:,} {}%".format(name, mem_usage, mem_limit, mem_percent))

                    pipe.gauge('system.memory.virtual.percent,service={}'.format(name), mem_percent)

                    # http://stackoverflow.com/questions/30271942/get-docker-container-cpu-usage-as-percentage
                    cpu_percent = 0

                    total_usage = TOTAL_USAGE.search(stats) or 0
                    cpu_delta = total_usage - prev_cpu.get(name, 0)

                    system_usage = SYSTEM_USAGE.search(stats) or 0
                    system_delta = system_usage - prev_system.get(name, 0)

                    num_cpus = NUM_CPUS.search(stats) or 1

                    if system_delta > 0 and cpu_delta > 0:
                        cpu_percent = (cpu_delta / system_delta) * num_cpus * 100.0

                    if debug:
                        log.debug("{}: Cpu: {}, {}: {}%".format(name, cpu_delta, system_delta, cpu_percent))

                    prev_cpu[name], prev_system[name] = total_usage, system_usage

                    pipe.gauge('system.cpu.percent,service={}'.format(name), cpu_percent)

                    tx_bytes = TX_BYTES.search(stats) or 0
                    rx_bytes = RX_BYTES.search(stats) or 0

                    tx = tx_bytes - prev_tx_bytes.setdefault(name, 0)  # B
                    rx = rx_bytes - prev_rx_bytes.setdefault(name, 0)

                    timer = time.time()
                    elapsed = timer - prev_timer.get(name, 0)  # s
                    prev_timer[name] = timer

                    tx_rate = tx / elapsed if tx > 0 and elapsed > 0 else 0  # B/s
                    rx_rate = rx / elapsed if rx > 0 and elapsed > 0 else 0

                    pipe.gauge('system.network.send_rate,service={}'.format(name), tx_rate)
                    pipe.gauge('system.network.recv_rate,service={}'.format(name), rx_rate)

                    if debug:
                        log.debug("{}: Net Tx: {:,} -> {:,} ({}B/s)".format(name, tx_bytes, prev_tx_bytes[name], tx_rate))
                        log.debug("{}: Net Rx: {:,} -> {:,} ({}B/s)".format(name, rx_bytes, prev_rx_bytes[name], rx_rate))

                    prev_tx_bytes[name] = tx_bytes
                    prev_rx_bytes[name] = rx_bytes

                    pipe.gauge('system.disk.root.percent,service={}'.format(name), 0)

            elapsed = time.time() - start
            log.debug("docker: {}ms".format(int(elapsed * 1000)))
            time.sleep(interval - elapsed)

    except Exception as e:
        log.exception(e)
    log.debug(f'Bot name {bot.first_name!r} ({bot.name})')

    common.BOT = bot

    commands.setup(updater)

    updater.start_polling()
    updater.idle()

    log.debug('Finish')


if __name__ == '__main__':
    Thread(target=download_main_page_quotes, args=[log, DIR_COMICS]).start()
    Thread(target=download_seq_page_quotes, args=[log, DIR_COMICS]).start()
    Thread(target=download_random_quotes, args=[log, DIR_COMICS]).start()
    Thread(target=do_backup, args=[log_backup]).start()
    Thread(target=run_parser_health_check, args=[log]).start()

    while True:
        try:
            main()
        except Exception as e:
            log.exception('')

            db.Error.create_from(main, e)

            timeout = 15
            log.info(f'Restarting the bot after {timeout} seconds')
            time.sleep(timeout)
	#increment the current read
	pickle_f_handle = open(self.cache_count_file_name, "w")
	cPickle.dump(num_time_cach_read, pickle_f_handle)
	pickle_f_handle.close()

	#get cacheifneeded i.e. when num_time_cach_read > 0
        try:
		if(num_time_cach_read>0):
			pickle_f_handle = open(self.cache_data_file_name)
			cachedresultslist = cPickle.load(pickle_f_handle)
			pickle_f_handle.close()
			if(len(cachedresultslist) < self.refreshwindowperiod):
				log.info("Existing cache size:  <%s> is less than refresh window size: <%s>" %(len(cachedresultslist),self.refreshwindowperiod ))
				cachedresultslist=[]
        except Exception, e:
            log.exception(e)
            log.info("Unable to find cache file: <%s>"%(self.cache_data_file_name))

	#modify the params to be sent to DB query
	param = self.get_params()
	end = param['endtime']
	log.debug("Default dates received in getcache are start: <%s> and end: <%s> "%(param['starttime'],param['endtime']))
	start = self.apply_delta(end)

	#remove the cache elements that will be refreshed
	if(len(cachedresultslist) > 0):
		cachedresultslist=cachedresultslist[:(len(cachedresultslist)-self.refreshwindowperiod)]	
	else:
		start = param['starttime']	
		log.debug("Setting date back to  start: <%s> "%(param['starttime']))
	return 	cachedresultslist, {'starttime': start, 'endtime': end}	
	#increment the current read
	pickle_f_handle = open(self.cache_count_file_name, "w")
	cPickle.dump(num_time_cach_read, pickle_f_handle)
	pickle_f_handle.close()

	#get cacheifneeded i.e. when num_time_cach_read > 0
        try:
		if(num_time_cach_read>0):
			pickle_f_handle = open(self.cache_data_file_name)
			cachedresultslist = cPickle.load(pickle_f_handle)
			pickle_f_handle.close()
			if(len(cachedresultslist) < self.refreshwindowperiod):
				log.info("Existing cache size:  <%s> is less than refresh window size: <%s>" %(len(cachedresultslist),self.refreshwindowperiod ))
				cachedresultslist=[]
        except Exception, e:
            log.exception(e)
            log.info("Unable to find cache file: <%s>"%(self.cache_data_file_name))

	#modify the params to be sent to DB query
	param = self.get_params()
	end = param['endtime']
	log.debug("Default dates received in getcache are start: <%s> and end: <%s> "%(param['starttime'],param['endtime']))
	start = self.apply_delta(end)

	#remove the cache elements that will be refreshed
	if(len(cachedresultslist) > 0):
		cachedresultslist=cachedresultslist[:(len(cachedresultslist)-self.refreshwindowperiod)]	
		param['starttime'] = start
	else:
		log.debug("Setting date back to  start: <%s> "%(param['starttime']))
	return cachedresultslist, param
Example #20
0
def run_docker(address, interval, host, port, debug=False):
    prev_cpu, prev_system = {}, {}
    prev_tx_bytes, prev_rx_bytes, prev_timer = {}, {}, {}
    client = statsd.StatsClient(host, port)
    MEM_USAGE = jmespath.compile('memory_stats.usage')
    MEM_LIMIT = jmespath.compile('memory_stats.limit')
    TOTAL_USAGE = jmespath.compile('cpu_stats.cpu_usage.total_usage')
    SYSTEM_USAGE = jmespath.compile('cpu_stats.system_cpu_usage')
    NUM_CPUS = jmespath.compile('length(cpu_stats.cpu_usage.percpu_usage)')
    TX_BYTES = jmespath.compile(
        'networks.eth0.tx_bytes')  # TODO: Always eth0??? (likely not...)
    RX_BYTES = jmespath.compile('networks.eth0.rx_bytes')
    try:
        while True:
            with client.pipeline() as pipe:
                start = time.time()
                containers = get(address, '/containers/json?all=1', debug)
                for container in containers:
                    name = container.get('Names')[0].strip('/')
                    status = container.get('Status')
                    id_ = container.get('Id')
                    log.debug("{}: {}".format(name, status))
                    stats = get(address,
                                '/containers/{}/stats?stream=0'.format(id_),
                                debug)  # Very slow call...

                    mem_usage = MEM_USAGE.search(stats) or 0
                    mem_limit = MEM_LIMIT.search(stats) or 1
                    mem_percent = 100.0 * (mem_usage /
                                           mem_limit) if mem_limit > 0 else 0

                    if debug:
                        log.debug("{}: Mem: {:,} {:,} {}%".format(
                            name, mem_usage, mem_limit, mem_percent))

                    pipe.gauge(
                        'system.memory.virtual.percent,service={}'.format(
                            name), mem_percent)

                    # http://stackoverflow.com/questions/30271942/get-docker-container-cpu-usage-as-percentage
                    cpu_percent = 0

                    total_usage = TOTAL_USAGE.search(stats) or 0
                    cpu_delta = total_usage - prev_cpu.get(name, 0)

                    system_usage = SYSTEM_USAGE.search(stats) or 0
                    system_delta = system_usage - prev_system.get(name, 0)

                    num_cpus = NUM_CPUS.search(stats) or 1

                    if system_delta > 0 and cpu_delta > 0:
                        cpu_percent = (cpu_delta /
                                       system_delta) * num_cpus * 100.0

                    if debug:
                        log.debug("{}: Cpu: {}, {}: {}%".format(
                            name, cpu_delta, system_delta, cpu_percent))

                    prev_cpu[name], prev_system[
                        name] = total_usage, system_usage

                    pipe.gauge('system.cpu.percent,service={}'.format(name),
                               cpu_percent)

                    tx_bytes = TX_BYTES.search(stats) or 0
                    rx_bytes = RX_BYTES.search(stats) or 0

                    tx = tx_bytes - prev_tx_bytes.setdefault(name, 0)  # B
                    rx = rx_bytes - prev_rx_bytes.setdefault(name, 0)

                    timer = time.time()
                    elapsed = timer - prev_timer.get(name, 0)  # s
                    prev_timer[name] = timer

                    tx_rate = tx / elapsed if tx > 0 and elapsed > 0 else 0  # B/s
                    rx_rate = rx / elapsed if rx > 0 and elapsed > 0 else 0

                    pipe.gauge(
                        'system.network.send_rate,service={}'.format(name),
                        tx_rate)
                    pipe.gauge(
                        'system.network.recv_rate,service={}'.format(name),
                        rx_rate)

                    if debug:
                        log.debug("{}: Net Tx: {:,} -> {:,} ({}B/s)".format(
                            name, tx_bytes, prev_tx_bytes[name], tx_rate))
                        log.debug("{}: Net Rx: {:,} -> {:,} ({}B/s)".format(
                            name, rx_bytes, prev_rx_bytes[name], rx_rate))

                    prev_tx_bytes[name] = tx_bytes
                    prev_rx_bytes[name] = rx_bytes

                    pipe.gauge(
                        'system.disk.root.percent,service={}'.format(name), 0)

            elapsed = time.time() - start
            log.debug("docker: {}ms".format(int(elapsed * 1000)))
            time.sleep(interval - elapsed)

    except Exception as e:
        log.exception(e)
 def real_recovery_db(self):
     try:
         config = getconf()
         db_recovery_idx_file = os.path.join(self.backup_dir,
                                             config.client.db_idx_name)
         if self.transfer_method != 'tcp':
             mount_result = apply_source_and_mount(
                 re_id=self.re_id, mount_path=self.backup_dir)
             self.mount_path = self.backup_dir
             if not mount_result:
                 log.info('mount {} fail!!!'.format(self.recv_file_dir))
                 return False, 0
         if not os.path.exists(db_recovery_idx_file):
             log.error(
                 'TASKID:{},db recovery index file not exist! recovery fail!'
                 .format(self.task_id))
             return False, 0
         log.info('db_recovery_idx_file:{},recv_file_dir:{}'.format(
             db_recovery_idx_file, self.recv_file_dir))
         sqlite_conn = get_sqlite_conn(db_recovery_idx_file)
         task_info = get_db_info_record(sqlite_conn)
         backup_type = int(task_info[2])
         sqlite_conn.close()
         log.info('recovery backup_type is:{}'.format(backup_type))
         if not self.check_source_db_target_db():
             log.error('target_db and to_db wrong relationship!!!')
             return False, 0
         hdb_setting = os.path.join(self.dir_instance, 'HDBSettings.sh')
         recover_sys = os.path.join(self.dir_instance,
                                    'exe/python_support/recoverSys.py')
         is_create_db = False
         if self.to_db != 'systemdb':
             if self.to_db not in self.show_hana_databases():
                 self.create_tenant_db()
                 is_create_db = True
             self.stop_tenant_database(self.to_db)
         if backup_type == config.DB_BACKUP_TYPE_FULL:  # full recovery
             log.info(
                 'begin login hana recovery!!!,recv_file_dir:{}'.format(
                     self.recv_file_dir))
             file_path = os.path.join(self.recv_file_dir, 'full')
             if self.target_db == 'systemdb':
                 recovery_command = r"\"RECOVER DATA ALL USING FILE ('{}') " \
                                    r"CLEAR LOG\"".format(file_path)
                 exec_command = 'su - {} -c "{} {} --command={}"'.format(
                     self.hana_adm, hdb_setting, recover_sys,
                     recovery_command)
                 result = exec_cmd2(exec_command)
             else:
                 recovery_command = r"\"RECOVER DATA for {} ALL USING FILE ('{}') CLEAR LOG\"".format(
                     self.to_db, file_path)
                 exec_command = self.system_db_exec_command_str(
                     recovery_command)
                 result = exec_cmd2(exec_command)
             if result['ret'] != 0:
                 log.error('recovery {} fail!!!'.format(self.target_db))
                 self.check_customize_recovery(is_create_db)
                 return False, 0
             return True, 1
         else:  # diff recovery
             future_time = (
                 datetime.datetime.now() +
                 datetime.timedelta(days=365)).strftime("%Y-%m-%d %H:%M:%S")
             full_backup_id = self.obtain_full_backup_id_from_full_backup_file(
             )
             if self.target_db == 'systemdb':
                 recovery_command = r"\"RECOVER DATABASE UNTIL TIMESTAMP '{future_time}'  clear log USING CATALOG PATH " \
                                    r"('{recv_file_path}') USING LOG PATH ('{recv_file_path}') USING DATA PATH " \
                                    r"('{recv_file_path}') USING BACKUP_ID {full_backup_id} CHECK ACCESS USING FILE\"". \
                     format(future_time=future_time, full_backup_id=full_backup_id,
                            recv_file_path=self.recv_file_dir)
                 exec_command = 'su - {} -c "{} {} --command={}"'.format(
                     self.hana_adm, hdb_setting, recover_sys,
                     recovery_command)
                 exec_command_log = exec_command
             else:
                 recovery_command = r"\"RECOVER DATABASE for {new_db_name} UNTIL TIMESTAMP '{future_time}'  clear log " \
                                    r"USING CATALOG PATH ('{recv_file_path}') USING LOG PATH ('{recv_file_path}') " \
                                    r"USING DATA PATH ('{recv_file_path}') USING BACKUP_ID {full_backup_id} CHECK " \
                                    r"ACCESS USING FILE\"".format(new_db_name=self.to_db,
                                                                  future_time=future_time,
                                                                  full_backup_id=full_backup_id,
                                                                  recv_file_path=self.recv_file_dir)
                 exec_command = self.system_db_exec_command_str(
                     recovery_command)
                 exec_command_log = exec_command.replace(
                     r'-p \"{}\"'.format(self.system_db_pwd), '-p ******')
             log.info('exec_command is:{}'.format(exec_command_log))
             result = exec_cmd2(exec_command)
             if result['ret'] != 0:
                 log.error('recovery {} fail!!!'.format(self.target_db))
                 self.check_customize_recovery(is_create_db)
                 return False, 0
             return True, 1
     except Exception as ex:
         log.exception(ex)
         raise ex
def index(request):
    log.debug('ddddd {} {name}', 'sss', name='haha')
    log.exception('hhohohoho---')
    return HttpResponse("Hello, world. You're at the polls index.")