def test_2(self): test_logger.log(21, 'test_1 Describe: Test Case for log.py') logger = log.get_logger() logger.info('test_2 info') logger.debug('test_2 debug') logger.warning('test_2 warning') logger.error('test_2 error') logger.critical('test_2 critical') sleep_progressbar(2)
def test_cleanup(self): """cleanup exist index""" logger.info(self.test_cleanup.__doc__) es_obj = ElasticsearchObj(ES_ADDRESS[0], ES_USERNAME, ES_PASSWORD, ES_PORT, cafile="", no_verify=False) curr_indices = es_obj.es_indices_names if curr_indices: es_obj.multi_delete_indices(curr_indices, INDEX_NAME) util.sleep_progressbar(60)
def test_3(self): logger = log.get_logger(logfile='test_3.log', logger_name='test3', debug=True, reset_logger=True) logger.info('test_3 start ...') logger.warning('test_3 hello,world') logger.debug('test_3 hello,world') logger.error('test_3 hello,world') logger.critical('test_3 hello,world') sleep_progressbar(3)
def test_1(self): test_logger.log(21, 'test_1 Describe: Test Case for log.py') log_file = "test_1.log" log.basic_config(log_file) logger = logging.getLogger(__name__) logger.info('test_1 start ...') logger.warning('test_1 hello,world') logger.debug('test_1 hello,world') logger.error('test_1 hello,world') logger.critical('test_1 hello,world') sleep_progressbar(1)
def test_index(self): """Elasticsearch index Test: multi-thread index""" logger.info(self.test_index.__doc__) es_index_obj = ElasticsearchIndex( ES_ADDRESS, ES_USERNAME, ES_PASSWORD, ES_PORT, NUMBER_OF_INDICES, NUMBER_OF_DOCUMENTS, BULK_SIZE, INDEX_NAME) indices_name = es_index_obj.multi_index_random() for index_name in indices_name: es_index_obj.is_index_green(index_name) logger.info("Sleep 600s after the iteration es index complete...") util.sleep_progressbar(600)
def wait_data_sync(self, drive1, drive2, check_data=False, check_acls=False, retry=360): """ wait for data sync between drive1 and drive2 @params: (char) drive1 (char) drive2 @output: (bool) True / False """ for x in range(1, retry + 1): sync_flag = False try: obj_filecmp = filecmp.dircmp(drive1, drive2) left_right_only = obj_filecmp.left_only + obj_filecmp.right_only if left_right_only: sync_flag = False logger.warning('FAIL: Check meta sync between %s and %s ' % (drive1, drive2)) logger.warning( "Wait to meta sync between %s and %s --> \n %s" % (drive1, drive2, json.dumps(left_right_only, indent=4))) util.sleep_progressbar(30) continue else: if check_data and obj_filecmp.diff_files: # logger.warning('FAIL: Check data sync(all file the same) between %s and %s ' % (drive1, drive2)) logger.warning( "Wait to data sync between %s and %s --> \n %s" % (drive1, drive2, json.dumps(obj_filecmp.diff_files, indent=4))) util.sleep_progressbar(30) continue if check_acls: drive1_acls = self.get_acls(drive1) drive2_acls = self.get_acls(drive2) not_sync_acls_list = util.get_list_difference( drive1_acls, drive2_acls) if not_sync_acls_list: sync_flag = False # logger.warning('FAIL: Check acls sync between %s and %s ' % (drive1, drive2)) logger.warning( "Wait to acls sync between %s and %s --> \n %s" % (drive1, drive2, json.dumps(not_sync_acls_list, indent=4))) util.sleep_progressbar(30) continue sync_flag = True logger.info( 'PASS: Check data sync between %s and %s (check_data=%s, check_acls=%s)' % (drive1, drive2, check_data, check_acls)) break except Exception as e: logger.error( "ERROR: run wait_data_sync, Will retry %d/%d \n %s" % (x, retry, str(e))) if 'Access is denied' in str(e): break util.sleep_progressbar(30) continue else: sync_flag = False logger.error( 'Check data sync timeout between %s and %s (check_data=%s, check_acls=%s)...' % (drive1, drive2, check_data, check_acls)) if len(obj_filecmp.common_dirs) > 0: for item in obj_filecmp.common_dirs: self.wait_data_sync( os.path.join(drive1, item), os.path.abspath(os.path.join(drive2, item)), check_data, check_acls, retry) return sync_flag