def img_save(self, rd, out_flag): img_org = cv2.imread(gl.get_value('scratch_path') + 'workbench.png') path = gl.get_value('icon_path') for i in range(len(rd)): w, h, x, y, _ = rd[i][0][0] img = [] for j in range(y, y+h): img.append(img_org[j][x:x+w]) if out_flag == 'OI': if td.text_detector(np.array(img), 320, 320, 0.5) == 0: cv2.imwrite(path+str(i)+'.png', np.array(img)) elif out_flag == 'ALL': cv2.imwrite(path + str(i) + '.png', np.array(img))
def statistics(): """thid function used to statistic total tetscases numbers ,and passed numbers ,and failed numbers""" import basic_class import global_variables import datetime test_starttime = global_variables.get_value('test_starttime') test_endtime = datetime.datetime.now() basic_class.mylogger_record.debug('tests end at: '+str(test_endtime)) time_costs = (test_endtime - test_starttime).seconds/60 total_testcases_num = global_variables.get_value('total_testcases_num') passed_testcases_num = global_variables.get_value('passed_testcases_num') failed_testcases_num = global_variables.get_value('failed_testcases_num') # print statistics to summary.log basic_class.mylogger_summary.yes('\nTotal number of Test Cases: '+str(total_testcases_num)) basic_class.mylogger_summary.yes('PASS: '******'FAIL: '+str(failed_testcases_num)) # print time info to summary.log basic_class.mylogger_summary.yes('\n\n=============================================') basic_class.mylogger_summary.yes('Test started at: '+str(test_starttime)) basic_class.mylogger_summary.yes('Test endded at: '+str(test_endtime)) basic_class.mylogger_summary.yes('Total time is: {:.2} minutes'.format(time_costs)) basic_class.mylogger_summary.yes('=============================================') # print time info to screen and log basic_class.mylogger_record.info('Test started at: '+str(test_starttime)) basic_class.mylogger_record.info('Test endded at: '+str(test_endtime)) basic_class.mylogger_record.info('Total time is: {:.2} minutes'.format(time_costs))
def create_log_folders(): """this function will fetch mx version and create log and summary folders based on mx_version""" import remote_operations import global_variables #import basic_class import time import os mx1_host1_ip = global_variables.get_value('mx1_host1_ip') root_account = global_variables.get_value( 'root_account') # root by default root_passwd = global_variables.get_value('root_passwd') # sshport = global_variables.get_value('sshport') #owm_common_version = remote_operations.remote_operation(mx1_host1_ip,root_account,root_passwd,'rpm -qa|grep owm|grep owm-common',1,'owm-common-',1) import paramiko ssh0 = paramiko.SSHClient() ssh0.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh0.connect(hostname=mx1_host1_ip, port=sshport, username=root_account, password=root_passwd) cmds = 'rpm -qa|grep owm|grep owm-common' stdin, stdout, stderr = ssh0.exec_command(cmds) okout = stdout.read() errout = stderr.read() ssh0.close() if len(errout) == 0: out = str(okout, 'utf-8') else: out = str(errout, 'utf-8') print("Some error seems happened:\n" + out) exit(1) owm_version = out.split('owm-common-')[1].strip() global_variables.set_value('owm_version', owm_version) #print("Some error seems happened:\n"+out) initialpath = global_variables.get_value('initialpath') currenttime = time.strftime("%Y-%m-%d-%H-%M") foldername = owm_version + '-' + '{}'.format(currenttime) if os.path.exists('logs/' + foldername): try: os.remove('logs/' + foldername + '/alltestcases.log') except FileNotFoundError: pass else: os.makedirs('logs/' + foldername) if os.path.exists('summary/' + foldername): try: os.remove('summary/' + foldername + '/summary.log') except FileNotFoundError: pass else: os.makedirs('summary/' + foldername) global_variables.set_value('logpath', initialpath + '/logs/' + foldername) global_variables.set_value('summarypath', initialpath + '/summary/' + foldername)
def main(): """main function to active logging,testcase running""" import global_variables global_variables.set_value('initialpath',initialpath) testcaselocation = global_variables.get_value('argvlist') chloglevel = global_variables.get_value('chloglevel') tclocation = basic_function.parse_testcaselocation(testcaselocation) # format testcase location in a list for given formats basic_function.execute(tclocation,initialpath) # executing testcases basic_function.statistics() # statistics all testcases results
def summary(result_lists, tc_name=''): """this function will analyze the test outcome,determin if tests successfully or not,and record results to summary.log""" import basic_class import global_variables basic_class.mylogger_record.debug('result_lists= ' + str(result_lists)) total_testcases_num = global_variables.get_value('total_testcases_num') passed_testcases_num = global_variables.get_value('passed_testcases_num') failed_testcases_num = global_variables.get_value('failed_testcases_num') success_flag = 0 # use to accumulate the 'success' number,from 0 target = len( result_lists ) # for result_lists=['threshold success', 'count success'] ,target wil be 2 (success) # if result_lists=['threshold success', 'count faile'],target still be 2, but will failed if tc_name != '': # will use customer input testcase name to print testcase_name = tc_name else: # will use default testcase name got from 'os.getcwd().split('/')[-1]' testcase_name = global_variables.get_value('testcase_name') summary_print_length = int( global_variables.get_value('summary_print_length')) for result in result_lists: if 'success' in result.lower(): success_flag += 1 dummy_length2 = int(summary_print_length - len(testcase_name) - 7) basic_class.mylogger_record.debug('success_flag= ' + str(success_flag)) if success_flag == target: basic_class.mylogger_recordnf.yes('----------Testcase: ' + testcase_name + ' passed.----------\n') basic_class.mylogger_summary.yes(testcase_name + ' ' + '.' * dummy_length2 + ' [PASS]') passed_testcases_num += 1 global_variables.set_value( 'passed_testcases_num', passed_testcases_num) # update passed_testcases_num else: basic_class.mylogger_recordnf.no('----------Testcase: ' + testcase_name + ' failed.----------\n') basic_class.mylogger_summary.no(testcase_name + ' ' + '.' * dummy_length2 + ' [FAIL]') failed_testcases_num += 1 global_variables.set_value( 'failed_testcases_num', failed_testcases_num) # update total_testcases_num total_testcases_num += 1 global_variables.set_value( 'total_testcases_num', total_testcases_num) # update total_testcases_num
def add_run_time(): """add testcase costs time for each testcase""" import global_variables import datetime testcase_stoptime = global_variables.get_value('testcase_stoptime') testcase_starttime = global_variables.get_value('testcase_starttime') print('testcase_starttime= ' + str(testcase_starttime)) print('testcase_stoptime= ' + str(testcase_stoptime)) testcases_cost_time = (testcase_stoptime - testcase_starttime).seconds print('testcases_cost_time=' + str(testcases_cost_time))
def remote_operation(sshhost,cmds,\ username = '',\ passwd = '',\ confirmflag = 1,\ confirmobj = '',\ confirmobjcount = 1,\ sshport = 22,\ keyfile = '',\ outlog ='sshout.log',\ errorlog ='ssherror.log',\ paramikologenable = 0 \ ): """this function is used to pick up ssh auth with pubkey or passwordbased on the imput auth flag""" import os import global_variables import basic_class sshhost = sshhost ssh_authtype_flag = global_variables.get_value('sshnonpassauth_flag_'+sshhost) if ssh_authtype_flag == str(1): basic_class.mylogger_record.debug('eatablishing ssh connection with pubkey to {} ...'.format(sshhost)) return(remote_operation_with_sshpubkeyauth(sshhost,cmds,username,passwd,confirmflag,confirmobj,confirmobjcount,sshport,keyfile,outlog,errorlog,paramikologenable)) elif ssh_authtype_flag == str(0): basic_class.mylogger_record.debug('eatablishing ssh connection with password to {} ...'.format(sshhost)) return(remote_operation_with_sshpasswordauth(sshhost,cmds,username,passwd,confirmflag,confirmobj,confirmobjcount,sshport,keyfile,outlog,errorlog,paramikologenable)) else: basic_class.mylogger_record.error('SSH establish failed,please check manually!!') exit(1)
def welcome(): """the welcome function used to print some welcome header when using this WuKong test suits also print headers in summary.log """ import basic_class import global_variables summary_print_length = 100 global_variables.set_value('summary_print_length', summary_print_length) owm_version = global_variables.get_value('owm_version') summary_title = ' MX TestCases Summary for ' + owm_version + ' ' if len(summary_title) % 2 == 0: #make sure summary_title has even length pass else: summary_title += ' ' dummy_length1 = int((summary_print_length - len(summary_title)) / 2) basic_class.mylogger_summary.yes('=' * dummy_length1 + summary_title + '=' * dummy_length1 + '\n') # below 3 variables will be used in summaryfunction and statistics function later to generate a summary total_testcases_num = 0 passed_testcases_num = 0 failed_testcases_num = 0 global_variables.set_value('total_testcases_num', total_testcases_num) global_variables.set_value('passed_testcases_num', passed_testcases_num) global_variables.set_value('failed_testcases_num', failed_testcases_num)
def __init__(self, screen_shot): self.origin = screen_shot self.x_axis = screen_shot[0][0] self.y_axis = screen_shot[0][1] self.positionx = screen_shot[0][2] self.positiony = screen_shot[0][3] self.cflag = screen_shot[0][4] img = screen_shot[1].reshape(self.y_axis, self.x_axis) img_blur = cv2.GaussianBlur(img, eval(gl.get_value('gaussian_blur')), 5) #cv2.imshow('', img_blur) #cv2.waitKey() self.pic_blur = np.array(img_blur).ravel() self.pic_original = np.array(screen_shot[1])
def CURSOR_REST(self): window_position = eval(gl.get_value('main_window_position')) move_coordinates = np.array([window_position[0], window_position[1]]) + np.array([20, 20]) pyautogui.moveTo(move_coordinates[0], move_coordinates[1])
def __init__(self, chloglevel): """ definition of some""" self.logger = logging.getLogger('WK-record') self.logger.setLevel(logging.DEBUG) #defaut 'DEBUG' self.chloglevel = chloglevel self.ch = logging.StreamHandler() #get chloglevel from outside if 'WARNING' in self.chloglevel: self.ch.setLevel( logging.DEBUG ) # "-v" or '-vv' will both display DEBUG information elif 'DEBUG' in self.chloglevel: self.ch.setLevel(logging.DEBUG) else: self.ch.setLevel(logging.INFO) #default 'INFO' #self.ch.setLevel(logging.ERROR) #initialpath = global_variables.get_value('initialpath') #print('initialpath='+initialpath) import global_variables self.logpath = global_variables.get_value('logpath') self.fh = logging.FileHandler(self.logpath + '/alltestcases.log') self.fh.setLevel(logging.DEBUG) self.formatter = logging.Formatter( '[%(asctime)s] [%(levelname)s] %(message)s') self.ch.setFormatter(self.formatter) self.fh.setFormatter(self.formatter) self.logger.addHandler(self.ch) self.logger.addHandler(self.fh)
def parse_chloglevel(): """this function gte the chloglevel of this test""" parse_args() # get all args import global_variables import basic_class argvlist = global_variables.get_value( 'argvlist') # get argvlist of arguments if argvlist.count('-v') > 1 or argvlist.count( '-vv') > 1: # determine the chloglevel (displayed to screen) basic_class.mylogger_record.error( "multiple '-v' or '-vv' detected,please make sure only one entered!" ) exit() elif argvlist.count('-v') == 1 or argvlist.count('-vv') == 1: if '-v' in argvlist: chloglevel = 'WARNING' argvlist.remove('-v') else: chloglevel = 'DEBUG' argvlist.remove('-vv') else: chloglevel = 'ERROR' global_variables.set_value('chloglevel', chloglevel) # store chloglevel into dict return chloglevel
def string_ocr(self, rd): futures = [] string_loc = [] tree = ImgTree() temp = rd end_depth = eval(gl.get_value('end_depth')) block_size_iter = eval(gl.get_value('block_size_iter')) RD4 = tree.next_depth([rd], end_depth) Whole_Region1 = temp[0][1].reshape(temp[0][0][1], temp[0][0][0]) #cv2.imshow('', np.array(Whole_Region1)) #cv2.waitKey() with concurrent.futures.ThreadPoolExecutor() as executor: for i in range(block_size_iter[0], block_size_iter[1]): future = executor.submit(self.ocr_concurrent_wrap, i, rd, RD4, Whole_Region1) futures.append(future) for future in concurrent.futures.as_completed(futures): string_loc.append(future.result()) strings_all = [] for i in range(len(RD4)): string_cl = [] for j in range(len(string_loc)): string_cl.append(string_loc[j][i]) strings_all.append([string_cl, [RD4[i][0][0][0]/2+RD4[i][0][0][2], RD4[i][0][0][1]/2+RD4[i][0][0][3]]]) return strings_all
def print_mx_version(): """print mx_version get from basic_function.create_log_folders()""" import global_variables import basic_class owm_version = global_variables.get_value('owm_version') basic_class.mylogger_record.info('owm_version = ' + owm_version)
def region_sep_main(self, act_type): test_window = ImageCapture(gl.get_value('version').lower()) region = test_window.icapture('main', act_type) regions = region return regions
def BINPUT(self, coordinates, act_input): coordinates_binput = [coordinates[0], coordinates[1]+eval(gl.get_value('input_ygap'))] pyautogui.click(coordinates_binput, clicks=2) pyautogui.press('backspace') pyautogui.typewrite(act_input)
def decide_import_or_reload(case_name,count_type,tmp_type): """this function used to deteimine import or repoad the testcases scripts,only the first time use import""" import basic_class import global_variables import importlib import time basic_class.mylogger_recordnf.title('\n[-->Executing '+case_name+'.py ...]') count_num = int(global_variables.get_value('{}'.format(count_type))) if count_num == 1: tm_type = importlib.import_module (case_name) global_variables.set_value('{}'.format(tmp_type),tm_type) count_num += 1 global_variables.set_value('{}'.format(count_type),count_num) else: tm_type = global_variables.get_value(tmp_type) importlib.reload(tm_type)
def smtp_set_debuglevel(self): """Set the debug output level. A value of 1 or True for level results in debug messages for connection and for all messages sent to and received from the server. A value of 2 for level results in these messages being timestamped smtp_debuglevel defined in etc global.vars """ smtp_debuglevel = global_variables.get_value('smtp_debuglevel') basic_class.mylogger_record.debug('command:<set_debuglevel '+str(smtp_debuglevel)+'>') self.smtp.set_debuglevel(int(smtp_debuglevel))
def __init__(self): """ definition of some""" self.loggerrr = logging.getLogger('WK-summary') self.loggerrr.setLevel(logging.INFO) #defaut 'INFO' import global_variables self.summarypath = global_variables.get_value('summarypath') self.fh = logging.FileHandler(self.summarypath + '/summary.log') self.fh.setLevel(logging.DEBUG) self.formatter = logging.Formatter('%(message)s') self.fh.setFormatter(self.formatter) self.loggerrr.addHandler(self.fh)
def region_init_sep(self): test_window = ImageCapture(gl.get_value('version').lower()) region = test_window.icapture('main', False) tree = ImgTree() RD0 = tree.next_depth(region, 1) regions_up = [] for i in range(len(RD0)): if RD0[i][0][0][1] >= 30: regions_up.append(RD0[i]) if len(regions_up) > 4: break RD1 = tree.next_depth([regions_up[-1]], 1) regions = regions_up[0:len(regions_up)-1] for i in range(3): if len(regions) > 7: break if RD1[i][0][0][1] >= 30: regions.append(RD1[i]) for i in range(len(regions)): cv2.imwrite('scratch\\'+str(i)+'.png', regions[i][0][1].reshape(regions[i][0][0][1], regions[i][0][0][0])) gl.set_value('top_border', regions[0][0][0]) gl.set_value('ribbon_up', regions[1][0][0]) gl.set_value('ribbon_down', regions[2][0][0]) gl.set_value('quick_access', regions[3][0][0]) gl.set_value('left_border', regions[4][0][0]) gl.set_value('navigator', regions[5][0][0]) gl.set_value('main_window', regions[6][0][0])
def pop_set_debuglevel(self): """Set the instance debugging level. This controls the amount of debugging output printed. The default, 0 A value of 1 produces a moderate amount of debugging output, generally a single line per request. A value of 2 or higher produces the maximum amount of debugging output, logging each line sent and received on the control connection. example: instance.pop_set_debuglevel() or instance.pop_set_debuglevel(1) """ pop_debuglevel = global_variables.get_value('pop_debuglevel') basic_class.mylogger_record.debug('command:<set_debuglevel ' + str(pop_debuglevel) + '>') self.pop3.set_debuglevel(int(pop_debuglevel))
def set_global_variables(): args = gv.Args() cfg_fp = gv.get_value('cfg_fp') data_fp = gv.get_value('data_fp') with open(data_fp, 'r') as f: options = f.readlines() options = [op.strip('\n').split('=') for op in options] options = dict(options) args.cfg_fn = re.findall('/cfg/(.*?)\.cfg', cfg_fp)[0] args.data_fn = re.findall('/cfg/(.*?)\.data', data_fp)[0] args.darknet_p = re.findall('(.*)/cfg/', cfg_fp)[0] args.gpus = gv.get_value('gpus') args.order = gv.get_value('order') args.draw_option = gv.get_value('draw_option') args.compute_step = gv.get_value('compute_step') args.valid_step = gv.get_value('valid_step') args.thresh = gv.get_value('thresh') key_name = args.data_fn train_fn = options['train'].split('/')[-1] valid_fn = options['valid'].split('/')[-1] cls_fp = options['names'] result_p = options['results'] backup_p = options['backup'] filetools.check_makedir(result_p) filetools.check_makedir(backup_p) filetools.check_makedir(result_p + '/cache') dataset_p = re.sub('/filelist/.*', '', options['train']) gv.set_value('key_name', key_name) gv.set_value('result_p', result_p) gv.set_value('backup_p', backup_p) gv.set_value('dataset_p', dataset_p) gv.set_value('train_fn', train_fn) gv.set_value('valid_fn', valid_fn) gv.set_value('cls_fp', cls_fp) show_gv() return args
def text_location(self, region): if self.method == 'tesseract': h, w = region.shape OCR_rlt = pytesseract.image_to_boxes(region) if len(OCR_rlt) > 0: OCR_sort = OCR_rlt.split('\n') for i in range(len(OCR_sort)): OCR_sort[i] = OCR_sort[i].split(' ') OCR_sort[i][1] = int(OCR_sort[i][1]) OCR_sort[i][2] = h - int(OCR_sort[i][2]) OCR_sort[i][3] = int(OCR_sort[i][3]) OCR_sort[i][4] = h - int(OCR_sort[i][4]) OCR_sort[i][5] = int(OCR_sort[i][5]) else: OCR_sort = [['', 0, 0, 0, 0]] elif self.method == 'OCR_Space': path = gl.get_value('scratch_path')+str(time.time())+'.png' time.sleep(0.5) cv2.imwrite(path, region) OCR_sort = ocr_space.image_to_boxes(path, api_key='0d5c85b2d388957') else: OCR_sort = [['', 0, 0, 0, 0]] return OCR_sort
# -*- coding: utf-8 -*- import xml.etree.ElementTree as ET import os from PIL import Image, ImageDraw import global_variables as gv cfg_fp = gv.get_value('cfg_fp') data_fp = gv.get_value('data_fp') cls_fp = gv.get_value('cls_fp') result_p = gv.get_value('result_p') weight_p = gv.get_value('weight_p') dataset_p = gv.get_value('dataset_p') train_fn = gv.get_value('train_fn') valid_fn = gv.get_value('valid_fn') def draw(args): with open(cls_fp, 'r') as f: classes = f.readlines() classes = [cls.strip('\n') for cls in classes] if len(classes) > 5: print("The number of the class is too large!") exit() colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 255), (0, 0, 0)] xmldir = '/home/anngic/dataset/data_coco/val2014/Annotations/' imgdir = '/home/anngic/dataset/data_coco/val2014/JPEGImages/' image_id = 'COCO_val2014_000000000872' in_xmlfile = open(xmldir + '%s.xml' % (image_id)) tree = ET.parse(in_xmlfile) root = tree.getroot()
# coding:utf-8 import os import commands import os import global_variables as gv from compute import compute_mAP from draw import draw_loss, draw_single_xml, draw_xmls, draw_mAP import filetools cfg_fp = gv.get_value('cfg_fp') data_fp = gv.get_value('data_fp') cls_fp = gv.get_value('cls_fp') result_p = gv.get_value('result_p') backup_p = gv.get_value('backup_p') weight_fp = gv.get_value('weight_fp') dataset_p = gv.get_value('dataset_p') train_fn = gv.get_value('train_fn') valid_fn = gv.get_value('valid_fn') key_name = gv.get_value('key_name') video_fp = gv.get_value('video_fp') def train(args): log_file = result_p + '/cache/avgloss.log' cmd = 'cd ' + args.darknet_p + ' && ' \ + args.darknet_p+'/darknet detector train ' + data_fp + ' ' + cfg_fp + ' ' + weight_fp + ' -gpus ' + args.gpus #commands.getoutput('script -a ' + log_file + ' -c ' + cmd) os.system('script -a ' + log_file + ' -c ' + cmd)
def traverse_judge(casename, currentlists): """decide import or reload testcase file""" import os import sys import global_variables import time import basic_class import importlib import shutil testcasename = casename + '.py' setup_num = int(global_variables.get_value('setup_num')) run_num = int(global_variables.get_value('run_num')) teardowm_num = int(global_variables.get_value('teardowm_num')) temppath = global_variables.get_value('temppath') if testcasename in currentlists: # print part test case names :setup or run or teardown shutil.copyfile(testcasename, temppath + '/' + testcasename) try: shutil.rmtree(temppath + '/__pycache__') time.sleep(0.01) except FileNotFoundError: basic_class.mylogger_record.debug('__pycache__ not exists') if 'setup' in casename.lower(): basic_class.mylogger_recordnf.title('[-->Executing setup.py ...]') if setup_num == 1: tmp_module_setup = importlib.import_module('setup') global_variables.set_value('tmp_module_setup', tmp_module_setup) setup_num += 1 global_variables.set_value('setup_num', setup_num) else: tmp_module_setup = global_variables.get_value( 'tmp_module_setup') importlib.reload(tmp_module_setup) if 'run' in casename.lower(): basic_class.mylogger_recordnf.title('[-->Executing run.py ...]') if run_num == 1: tmp_module_run = importlib.import_module('run') global_variables.set_value('tmp_module_run', tmp_module_run) run_num += 1 global_variables.set_value('run_num', run_num) else: tmp_module_run = global_variables.get_value('tmp_module_run') importlib.reload(tmp_module_run) if 'teardown' in casename.lower(): basic_class.mylogger_recordnf.title( '[-->Executing teardown.py ...]') if teardowm_num == 1: tmp_module_teardown = importlib.import_module('teardown') global_variables.set_value('tmp_module_teardown', tmp_module_teardown) teardowm_num += 1 global_variables.set_value('teardowm_num', teardowm_num) else: tmp_module_teardown = global_variables.get_value( 'tmp_module_teardown') importlib.reload(tmp_module_teardown)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import basic_class import global_variables import time import shutil import subprocess # run basic_class.mylogger_record.info('Begin running IMAPCollider testing ...') imap_collider = global_variables.get_value('imap_collider') outcode = subprocess.run(['{}'.format(imap_collider)]) basic_class.mylogger_record.info('the outcode of running IAMP collider is:' + str(outcode)) # copy summary.log summarypath = global_variables.get_value('summarypath') shutil.copy2('IC_Summary.log', '{}/IC_Summary.log'.format(summarypath))
import commands import matplotlib.pyplot as plt import numpy as np import global_variables as gv cfg_fp = gv.get_value('cfg_fp') data_fp = gv.get_value('data_fp') cls_fp = gv.get_value('cls_fp') result_p = gv.get_value('result_p') dataset_p = gv.get_value('dataset_p') train_fn = gv.get_value('train_fn') valid_fn = gv.get_value('valid_fn') key_name = gv.get_value('key_name') def draw(args): loss_fp = result_p + '/cache/' + 'avgloss.log' # mAP_fp = result_fp + '/' + 'mAP.log' display = 10 # solver test_interval = 100 # solver train_output = commands.getoutput( "cat " + loss_fp + " | grep 'avg,' | awk '{print $3}'") # train loss train_loss = train_output.split("\n") for i in range(len(train_loss)): if float(train_loss[i]) > 16 : train_loss[i] = str(16.0) train_output = commands.getoutput(
def icon_match(target_string, path): coordinates = [] for i in os.walk(path): for j in range(len(i[2])): a = i[2][j] if target_string == a.split('.')[0].lower(): gray_org = cv2.imread( gl.get_value('scratch_path') + 'previous.png') icon = cv2.imread(path + a) gray_icon = cv2.cvtColor(icon, cv2.COLOR_BGR2GRAY) gray_icon = cv2.resize( gray_icon, (gray_icon.shape[1] * eval(gl.get_value('scale')), gray_icon.shape[0] * eval(gl.get_value('scale')))) minloc_list = [] minval_list = [] minloc_list1 = [] minval_list1 = [] for u in range(eval(gl.get_value('thresh_hold_iteration'))): gray_icon_edged = [] margin = int(gray_icon.shape[0] * u * 0.025) for k in range(margin, gray_icon.shape[0] - margin): gray_icon_edged.append( gray_icon[k][margin:gray_icon.shape[1] - margin]) gray_icon_edged = np.array(gray_icon_edged) gray_org_processed = cv2.cvtColor(gray_org, cv2.COLOR_BGR2GRAY) thresh_org = cv2.adaptiveThreshold( gray_org_processed, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 3, 3) thresh_icon = cv2.adaptiveThreshold( gray_icon_edged, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 3, 3) res = cv2.matchTemplate(gray_icon_edged, gray_org_processed, cv2.TM_SQDIFF_NORMED) res1 = cv2.matchTemplate(thresh_icon, thresh_org, cv2.TM_SQDIFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) minloc_list.append(min_loc) minval_list.append(min_val) minloc_list = np.array(minloc_list) minval_list = np.array(minval_list) w, h = gray_icon_edged.shape[::-1] index = np.where(minval_list == min(minval_list)) top_left = minloc_list[index][0] bottom_right = (top_left[0] + w, top_left[1] + h) coordinates = (np.array(top_left) + np.array(bottom_right)) / 2 break return coordinates
def remote_operation_with_sshpasswordauth(sshhost,cmds,\ username = '',\ passwd = '',\ confirmflag = 1,\ confirmobj = '',\ confirmobjcount = 1,\ sshport = 22,\ keyfile = '',\ outlog ='sshout.log',\ errorlog ='ssherror.log',\ paramikologenable = 0 \ ): """This function will used to do remote operations through ssh_passwoed_auth. """ import paramiko # third party libs needs for ssh authentication import basic_class # using log part import os sshhost = sshhost # ssh destination hosts,can be IP or resolvable hostnames cmds = cmds # the commands going to run via ssh if username == '': # account-name used to establish ssh connection username = global_variables.get_value('root_account') else: username = username if passwd == '': # account-password used to establish ssh connection passwd = global_variables.get_value('root_passwd') else: passwd = passwd confirmflag = confirmflag # if need check the outcome to confirm operation success or failed,default 1 confirmobj = confirmobj # the target need to to compared or searched or confirmed,default empty confirmobjcount = confirmobjcount # the accurance of confirmobj,default 1 sshport = sshport # defaule ssh connection port ,22 by default outlog = outlog # normal ssh log file errorlog = errorlog # error ssh log file paramikologenable = paramikologenable # by default, paramikologdisabled ,set to 1 to enable if paramikologenable == 1: paramiko.util.log_to_file('ssh.log') #set up paramiko logging,disbale by default execute_flag = '' # execute flag:1 mesans success,0 means failed,-1 mesans blocked ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) basic_class.mylogger_record.debug('Establishing ssh connection with password ...') try: ssh.connect(hostname = sshhost, port = sshport, username = username, password = passwd) except: basic_class.mylogger_record.warning('Establishing ssh connection with password failed! Please check manually! ') exit (1) else: basic_class.mylogger_record.debug('Established ssh connection with password success!') stdin, stdout, stderr = ssh.exec_command(cmds) okout = stdout.read() errout = stderr.read() #print ('err:'+str(errout,'utf-8')) #print ('ok:'+str(okout,'utf-8')) if len(errout) == 0: sshout=str(okout,'utf-8') if confirmflag == 1: basic_class.mylogger_record.debug('confirmobj_count='+str(sshout.count(confirmobj))) if sshout.count(confirmobj) == confirmobjcount: basic_class.mylogger_record.debug('ssh success and target match') execute_flag = 1 #print('\033[1;32mOperation success\033[0m') else: basic_class.mylogger_record.error('ssh success but target mismatch') execute_flag = 0 #print ('\033[1;31mOperation failed\033[0m') else: basic_class.mylogger_record.debug('ssh success and no need check target') execute_flag = 1 else: sshout=str(okout,'utf-8')+str(errout,'utf-8') basic_class.mylogger_record.error('ssh operation fail') execute_flag = -1 basic_class.mylogger_record.debug("sshout=") basic_class.mylogger_recordnf.debug(sshout) return execute_flag,sshout #in case of use ssh.close()