def ActivityHandler(activity): global ActivityCount global FileHandle global Output if Output is not None: FileHandle = open(Output, "a") if FileHandle is None: sys.stdout.write(json.dumps(activity)) else: locate( activity['id'], 1, 10) FileHandle.write(json.dumps(activity) + "\n") FileHandle.close() ActivityCount += 1
def action(self, *data): # casestep steps = Action.readxls(filepath, 1) A = Action() k = 4 for step in steps: if step[0] == data[0]: desc = step[2] key_word, sign, param, wish = step[3], step[4], step[5], step[ 7] if sign != "": ele = locate.locate(sign, filepath, 3) tag, loc = ele[0], ele[1] if key_word.lower() == 'input': param = data[4] step = self.buildStep(key_word, tag, loc, param) print desc + ':' + step k += 1 eval(step) elif key_word.lower() == 'gettext': step = self.buildStep(key_word, tag, loc) text = eval(step) if text == wish: A.action_sign('Screenshot', data[0], 'PASS') else: A.action_sign('Screenshot', data[0], 'FAIL') else: step = self.buildStep(key_word, tag, loc, param) print desc + ':' + step eval(step) else: step = self.buildStep(key_word, param) print desc + ':' + step eval(step)
def callback(): data = locate.locate(e1.get()) angle = data[0] angle2 = data[1] for val in data: tree.insert('', 'end', values = data) redraw(angle, angle2)
def dispatch(values=None): if (values == None): return {'error': 'parameter is missing'} if (not (isinstance(values, dict))): return {'error': 'parameter is not a dictionary'} if ('error' in values): values.pop('error') return values if (not ('op' in values) or values['op'] == ''): values['error'] = 'no op is specified' return values #Perform designated function if (values['op'] == 'adjust'): result = adjust.adjust(values) return result elif (values['op'] == 'predict'): result = predict.predict(values) return result elif (values['op'] == 'correct'): result = correct.correct(values) return result elif (values['op'] == 'locate'): result = locate.locate(values) return result else: values['error'] = 'op is not a legal operation' return values
def request(self,name,pandora,*args,**kwargs): url=locate.locate(pandora,self.model) method,addr=self.m[name] addr=addr % kwargs url='https://%s%s' % (url,addr) req=requests.Request(method,url,params=kwargs) r=req.prepare() s=requests.Session() return s.send(r)
def get_points(in_dir): '''Get points latlngs''' points = [] file_format = ['jpg', 'jpeg', 'png'] if os.path.isdir(in_dir): files = os.scandir(in_dir) for file in files: if file.name.split('.')[-1] in file_format and file.is_file(): # Include address infomation and latlng address = locate(file.path) if address: lat, lng = address.google_latlng.split(',') points.append((float(lat), float(lng))) return points
def only_show_address(imgpath, detail=True): '''只显示地址信息 detail: bool, 是否显示详细的地址信息 ''' address = locate(imgpath, detail) # 用于绘制在地图上的坐标 lat, lng = address.google_latlng.split(',') gmap = gmplot.GoogleMapPlotter(float(lat), float(lng), zoom=15) gmap.heatmap([float(lat)], [float(lng)], radius=15, opacity=1) gmap.draw('imgmap.html') logging.debug('address: {}, detail: {}'.format(address, detail)) if detail: print(imgpath, address.formatted_address, address.sematic_description) else: print(imgpath, address.formatted_address)
def classify(class_, imgpath, showmap): if class_ and os.path.isdir(imgpath) and showmap: logging.debug('class_: {}, isdir: {}'.format(class_, os.path.isdir(imgpath))) base_path = imgpath fileformats = ['jpg', 'jpeg'] # 用于绘制在地图上的坐标 lats = [] lngs = [] # 使用 os.scandir() 要比 os.listdir() 更有效率 it = os.scandir(base_path) for entry in it: if entry.name.split('.')[-1] in fileformats and entry.is_file(): logging.info(entry.name) address = locate(entry.path) # 如果没有得到GPS信息则将其分到未知地点目录 if address is None: logging.warning('{} 未知地点'.format(entry.name)) unknown_path = os.path.join(base_path, '未知地点') os.makedirs(unknown_path, exist_ok=True) os.rename(entry.path, os.path.join(unknown_path, entry.name)) continue logging.debug(address) lat, lng = address.google_latlng.split(',') lats.append(float(lat)) lngs.append(float(lng)) if address.province == address.city: # 按照国家、省份、城市、区分类,并创建目录 path = os.path.join(base_path, address.country, address.city, address.district) else: path = os.path.join(base_path, address.city, address.district) os.makedirs(path, exist_ok=True) os.rename(entry.path, os.path.join(path, entry.name)) logging.info('entry name: {}, address: {}, path: {}'.format( entry.name, address, path)) # 去除重复的坐标 latlng_set = set() for i, j in zip(lats, lngs): latlng_set.add((i, j)) lats, lngs = zip(*latlng_set) center_lat = (max(lats) + min(lats)) / 2 center_lng = (max(lngs) + min(lngs)) / 2 logging.debug('center_lat: {}, center_lng: {}'.format( center_lat, center_lng)) logging.debug('lats: {}, lngs: {}'.format(lats, lngs)) gmap = gmplot.GoogleMapPlotter(center_lat, center_lng, zoom=10) gmap.heatmap(lats, lngs, radius=15, opacity=1, dissipating=True) gmap.draw('imgmap.html') elif not class_ and os.path.isfile(imgpath): logging.debug('class_: {}, isdir: {}'.format(class_, os.path.isdir(imgpath))) only_show_address(imgpath, detail=True) else: print('请检查输入的 class_ 和 imgpath 是否匹配!class_ 是 {},而 imgpath 是 {}'.format( class_, imgpath))
import numpy as np import cv2 import math from transformations import * import os from os import listdir from os.path import isfile, join from locate import locate if __name__ == '__main__': for filename in listdir('images'): file = 'images/' + filename img = cv2.imread(file) locate(img, save_result=True)
def connect(meet_id, password, audio, video, name, fixdropdown, keytimeout, starttimeout, jointimeout, passtimeout, cronlauncher): """Connect to meeting with the options above""" #Only start zoom if not started by cronlauncher script if not cronlauncher: restart_zoom() time.sleep(starttimeout) x, y = locate.locate('btnJoin') pyautogui.click(x, y) time.sleep(jointimeout) #Enter meeting id x, y = locate.locate('txtId') pyautogui.click(x, y) time.sleep(keytimeout) pyautogui.write(meet_id) time.sleep(keytimeout) #This is for if the V is not there, users must manually tell it if not fixdropdown: pyautogui.press('tab', interval=0.5) pyautogui.press('tab', interval=0.5) if name: logging.info('Using custom name') pyautogui.press('backspace', 50) pyautogui.write(name) else: logging.info('Ignoring custom name') pyautogui.press('tab', interval=0.5) #Set audio enable if not audio: logging.info('Enabling audio') pyautogui.press('enter', interval=0.5) else: logging.info('Disabling audio') pyautogui.press('tab', interval=0.5) #Set video enable if not video: logging.info('Enabling video') pyautogui.press('enter', interval=0.5) else: logging.info('Disabling video') #Navigate to the next screen pyautogui.press('tab', interval=0.5) logging.info('Submitting') pyautogui.press('enter', interval=1) #Password if password: logging.info('Using password. Waiting...') time.sleep(passtimeout) #Check for error message result = locate.locate('errorid') #If there is an error message, remove it if result: x, y = locate.locate('btnLeave') pyautogui.click(x, y) time.sleep(keytimeout) result = locate.locate('txtPass') #If the error was actuall fake, and password thing is there, enter the password if result: x, y = result pyautogui.click(x, y) time.sleep(keytimeout) pyautogui.write(password) time.sleep(keytimeout) pyautogui.press('tab', interval=0.5) pyautogui.press('enter', interval=1) else: logging.error('Fatal error occured. Invalid meeting id.') else: logging.info('Ignoring password')
def single_test(filepath, facePath, bottomPath): filename = filepath.split('/')[-1] curname = filename RES = getROI.getROI_(filepath, facePath, 1, bottomPath) bottom = RES[0] # 底座 ROI = RES[1] # ROI border = RES[2] # 外轮廓掩码 faceROI = RES[3] # 脸部 faceBorderMask = RES[4] # 脸部轮廓掩码 if outPath != '' and Ui_record.save_param[0] == True: cv2.imwrite(outPath + '/roi/' + filename, ROI) if outPath != '' and Ui_record.save_param[1] == True: cv2.imwrite(outPath + '/foundation/' + filename, bottom) if outPath != '' and Ui_record.save_param[2] == True: cv2.imwrite(outPath + '/face/' + filename, faceROI) if outPath != '' and Ui_record.save_param[3] == True: cv2.imwrite(outPath + '/faceMask/' + filename, faceBorderMask) if outPath != '' and Ui_record.save_param[4] == True: cv2.imwrite(outPath + '/borderMask/' + filename, border) sobel = locate.getLapliacian(ROI) canny_res = canny.canny_(ROI, faceROI, canny_params[0], canny_params[1], canny_params[2], canny_params[3], canny_params[4], canny_params[5]) # Canny二值 harris_res = canny.HarrisDetect(ROI, border, faceBorderMask, harris_params[0], harris_params[1]) # Harris二值 if outPath != '' and Ui_record.save_param[5] == True: cv2.imwrite(outPath + '/harris/' + filename, harris_res) if outPath != '' and Ui_record.save_param[6] == True: cv2.imwrite(outPath + '/canny/' + filename, canny_res) locate.th = merge_dist res1, res2, recs, feature = locate.locate(canny_res, ROI, border, faceBorderMask, harris_res, merge_dist, sobel) # 融合特征 if outPath != '' and Ui_record.save_param[7] == True: cv2.imwrite(outPath + '/res/' + filename, feature) if resOutPath != '': cv2.imwrite(resOutPath + '/res1_' + filename, res1) cv2.imwrite(resOutPath + '/res2_' + filename, res2) count = 10 for filename in os.listdir('D:/dataset0511/output/cache/scratched/'): os.remove('D:/dataset0511/output/cache/scratched/' + filename) for rec in recs: block = ROI[rec.y1:rec.y2 + 1, rec.x1:rec.x2 + 1] cv2.imwrite( 'D:/dataset0511/output/cache/scratched/' + str(count) + '.bmp', block) count += 1 if outPath != '' and Ui_record.save_param[8] == True: print("filename = ", curname) cv2.imwrite( outPath + '/blocks/' + curname.split('.')[0] + '_' + str(count - 10) + '.bmp', block) # 结果子图 if len(recs) == 0: labels = [] else: labels = predict.predict_() # cv2.imshow('res1', res1) # cv2.imshow('res2', res2) # cv2.waitKey() return res1, res2, recs, labels
from textsaveload import * from pic_scale import picscale from locate import locate import os import cv2 import mido # scaling picture (loc_pic_orig, loc_pic_scaled) = locate() # define the location of the picture infile = loc_pic_orig outfile = loc_pic_scaled height = input("set picture height(1~127):") # input picture height stat_pic = picscale(loc_pic_orig, loc_pic_scaled, height) # main mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) img = cv2.imread(loc_pic_scaled) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # initialize counters scan_x = 1 # colomns