def analyse_video(self): """ Analyse video """ self.save_threshold_json() namespath = self.analyser_config['namesPath'] try: self.append_text("Starting video analysis...") detect.main("video", self.dlg, self.saveLocationEntry.get(), float(self.iouEntry.get()), float(self.confidenceEntry.get()), namespath, self.createCsv.get()) self.append_text("Video analysis ended successfully") except Exception as err: self.append_text("error in video analysis") self.append_text(err)
def analyse_images(self): """ Analyse image """ self.save_threshold_json() namespath = self.analyser_config['namesPath'] try: self.append_text("Starting image analysis...") detect.main("images", self.dlg, self.saveLocationEntry.get(), float(self.iouEntry.get()), float(self.confidenceEntry.get()), namespath) self.append_text("Image analysis ended successfully") except Exception as err: self.append_text("error in image analysis") print(err)
def main(): ker = filter() roi_images, face_type, file_names = detect.main( ) # images and their respective classes order = ['DI', 'NE', 'SU', 'AN', 'FE', 'SA', 'HA', 'CO'] # order of arrangement wavelets = { 'HA': [], 'SA': [], 'SU': [], 'AN': [], 'DI': [], 'FE': [], 'NE': [], 'CO': [] } # array of the wavelet images OF SPECIFIC expressions # CLASSIFYING GUYS: NEEDS ABOVE WAVELETS DICTIONARY i = 0 print "saving wavelets" for img in roi_images: total = convolute(img, ker) # wavelets[face_type[i]].append(total.reshape((1,total.shape[0]*total.shape[1]))) np.savez('wavelets(roi-789-ck)/' + str(file_names[i][:10]), total.reshape((1, total.shape[0] * total.shape[1]))) i += 1 print i print "wavelets loaded" '''
def uploadfile(): if request.method == 'POST': os.makedirs(videos_path, exist_ok=True) os.makedirs(images_path, exist_ok=True) file = request.files['file'] dataset = request.form['dataset'] model = request.form['model'] file_name = secure_filename(file.filename) file_base_name = file_name.split('.')[0] videos_dir = os.path.join(videos_path, file_base_name) images_dir = os.path.join(images_path, file_base_name) os.makedirs(videos_dir, exist_ok=True) os.makedirs(images_dir, exist_ok=True) file.save(os.path.join(videos_dir, file_name)) main(file_name, dataset, model) (closest_dists_path, min_closest_dists_path, stats_vs_time_path, two_d_hist_density_vs_avg_dists_path, two_d_hist_density_vs_min_dists_path, two_d_hist_density_vs_violation_path, regression_density_vs_violations_path) = analyze_statistics(dataset, file_name) output_file_name = 'output_%s%s' % (file_base_name, constants.OUTPUT_FORMAT) output_file_path = os.path.join(videos_dir, output_file_name) return render_template('results.html', data=output_file_path, closest_dists_path=closest_dists_path, min_closest_dists_path=min_closest_dists_path, stats_vs_time_path=stats_vs_time_path, two_d_hist_density_vs_avg_dists_path=two_d_hist_density_vs_avg_dists_path, two_d_hist_density_vs_min_dists_path=two_d_hist_density_vs_min_dists_path, two_d_hist_density_vs_violation_path=two_d_hist_density_vs_violation_path, regression_density_vs_violations_path=regression_density_vs_violations_path)
def getYoloObjects(): img = request.files["file"] imgname = secure_filename(img.filename) path = basedir + '/data/images/' + imgname print(path) img.save(path) lst = main(type="images", iou_threshold=0.5, confidence_threshold=0.5, input_names=[path]) return jsonify(yoloRes=lst)
def main(): ker = filter() roi_images, face_type, file_names = detect.main() # images and their respective classes order = ['DI', 'NE', 'SU', 'AN', 'FE', 'SA', 'HA', 'CO'] # order of arrangement wavelets = {'HA': [],'SA': [],'SU': [],'AN': [],'DI': [],'FE': [],'NE':[],'CO':[]} # array of the wavelet images OF SPECIFIC expressions # CLASSIFYING GUYS: NEEDS ABOVE WAVELETS DICTIONARY i = 0 print "saving wavelets" for img in roi_images: total = convolute(img,ker) # wavelets[face_type[i]].append(total.reshape((1,total.shape[0]*total.shape[1]))) np.savez('wavelets(roi-789-ck)/'+str(file_names[i][:10]),total.reshape((1,total.shape[0]*total.shape[1]))) i+=1 print i print "wavelets loaded" '''
import lasagne import sys from lasagne import layers from lasagne.updates import nesterov_momentum from nolearn.lasagne import NeuralNet # from sklearn.metrics import classification_report # from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score import detect from stratify import shuffle # from nolearn.lasagne import TrainSplit from sklearn.externals import joblib sys.setrecursionlimit(5000) images, classes, filenames = detect.main() d = {'DI': 0, 'CO':0, 'NE': 1, 'SU': 2, 'AN': 3, 'FE': 4, 'SA': 5, 'HA': 6} X = images y = [d[i] for i in classes] # Splitting dateset into train and test using stratified sampling train_index, test_index = shuffle(classes) X_train, X_test, y_train, y_test = [], [], [], [] for i in train_index: X_train.append(X[i]) y_train.append(y[i]) for i in test_index: X_test.append(X[i])
'cluster \t... group similar faces into clusters\n' 'train \t\t... train face recognition using faces in folders\n' 'show \t\t... show face recognition results\n' 'export \t\t... export face recognition results\n') if __name__ == "__main__": if len(sys.argv) == 1: print_help() exit() task = sys.argv[1] sys.argv.pop(1) if task == '--help' or task == 'help' or task == '-h': print_help() elif task == 'detect': detect.main() elif task == 'predict': predict.main() elif task == 'cluster': cluster.main() elif task == 'train': train.main() elif task == 'show': show.main() elif task == 'export': export.main() else: print_help()
def hasPerson(img): return detect_person.main("images", iou_threshold, confidence_threshold, img)
#-*- coding:utf-8 -*- import os import time import tensorflow as tf from tensorflow.python.saved_model import tag_constants import detect # main code - 이미지를 새로생성 # 대상 이미지 class가 모여있는 폴더를 path_file에 입력. path_file = 'C:/Users/kdan/BigJob12/main_project/_db/data/crawling_data/[개]' weights = './checkpoints/yolov4-416' saved_model_loaded = tf.saved_model.load(weights, tags=[tag_constants.SERVING]) for count, foldername in enumerate(os.listdir(path_file)): print(count, foldername) for count, filename in enumerate(os.listdir(path_file + '/' + foldername)): print(count, filename) path_src_file = path_file + '/' + foldername + '/' + filename # dst_name = filename.split('/')[-1].split('.jpg')[0] +'_crop'+'.jpg' # path_dst_file = path_file +'/'+foldername + '/'+ dst_name print(path_src_file) start = time.time() detect.main(path_src_file, saved_model_loaded) print('total time = ', time.time() - start)
from detect import main if __name__ == "__main__": main()
def process_data(thread_name, q, iou, confidence, names, create_csv, save_location): """ Analyse polled images and videos :param thread_name: Name of Thread :param q: Queue of to be analysed videos or images :param iou: iou value :param confidence: confidence value :param names: names file location :param create_csv: If csv file is created for analysed file :param save_location: save location for analysed file :return: None """ root = Tk() root.title("Polling UI") root.protocol("WM_DELETE_WINDOW", disable_event) txt = Text(root, height=20, width=70) vsb = Scrollbar(root, orient="vertical", command=txt.yview) txt.configure(yscrollcommand=vsb.set) vsb.pack(side="right", fill="y") txt.pack(side="left", fill="both", expand=True) root.update() save_location = save_location + r"\analysed" save_location = os.path.abspath(save_location) counter = 0 while not exitFlag: queueLock.acquire() print(".....Polling.....") append_text(txt, root, ".....Polling.....") if not workQueue.empty(): data = q.get() queueLock.release() append_text(txt, root, "Watchdog received event - % s." % data) print("%s processing %s" % (thread_name, data)) append_text(txt, root, "%s processing %s" % (thread_name, data)) file_end = (os.path.splitext(os.path.basename(data))[1]) print(file_end) append_text(txt, root, file_end) append_text(txt, root, save_location) print(save_location) data_list = [] data_list.insert(0, data) if file_end.lower() in video_extensions: counter = counter + 1 try: os.mkdir(save_location) except FileExistsError as error: print(error) append_text(txt, root, "Saved folder already exists") except OSError as error: print(error) append_text(txt, root, error) try: print("Starting video analysis...") append_text(txt, root, "Starting video analysis...") detect.main("video", data_list, save_location, iou, confidence, names, create_csv) print("Video analysis ended successfully") append_text(txt, root, "Video analysis ended successfully") except Exception as err: print("Error in video analysis") print(err) append_text(txt, root, "Error in video analysis") append_text(txt, root, err) elif file_end.lower() in image_extensions: counter = counter + 1 try: os.mkdir(save_location) except FileExistsError as error: print(error) append_text(txt, root, "Analysed folder already exists") except OSError as error: print(error) append_text(txt, root, error) try: print("Starting Image analysis...") append_text(txt, root, "Starting Image analysis...") detect.main("images", data_list, save_location, iou, confidence, names, create_csv) print("Image analysis ended successfully") append_text(txt, root, "Image analysis ended successfully") except Exception as err: print("Error in image analysis") append_text(txt, root, "Error in video analysis") print(err) append_text(txt, root, err) queueLock.acquire() queueCheck.remove(data) print(queueCheck) queueLock.release() append_text(txt, root, str(counter) + " Files analysed") else: queueLock.release() time.sleep(0.5) print("EXITING ANALYSE THREAD") append_text(txt, root, "EXITING ANALYSE THREAD")
name = match_info[i]['name'] photos = match_info[i]['photos'] for j, photo in enumerate(photos): label = name + '_m' + str(i) + '_' + str(j) os.system('wget ' + photo + ' -q -O ' + match_dir + label + '.jpg') # Pass images through yolov3 print('\nAnalyzing match images with YOLOv3...') sys.path.append('../yolov3') import detect as detect detect.opt.conf_thres = 0.60 detect.opt.image_folder = sys.path[0] + '/' + match_dir detect.opt.output_folder = sys.path[0] + '/' + yolo_dir detect.opt.txt_out = True detect.main(detect.opt) # Remove images of none or multiple people txt_files = sorted(glob.glob('%s/*.txt' % yolo_dir)) for txt_file in txt_files: labels = np.loadtxt(txt_file, dtype=np.float32).reshape(-1, 6) labels = labels[labels[:, 4] == 0] if labels.shape[0] == 1: # if only 1 person in image img_name = txt_file[:-4].split('/')[-1] box = labels[0].astype('int') h, w = box[3] - box[1], box[2] - box[0] area = w * h if (w > 150) and (h > 150) and (area > 30e3): img = cv2.imread(match_dir + img_name)
def main(*fits_file): """ :param file: :return: """ # Load configuration file config = ConfigParser() config.read("config.ini") # Get directory data_dir = "".join( [os.path.expanduser('~'), config.get('paths', 'data_dir')]) # Load time to see how long this will take start_time = time.time() # First load the latest HMI data file if not fits_file: inmap = input_data.main(data_dir) else: inmap = sunpy.map.Map(data_dir + fits_file['fits_file']) # Downsample if 4096x4096 (generally shouldnt be if using near-real-time JSOC data) if inmap.dimensions[0].value != 1024: inmap = inmap.resample(u.Quantity([1024, 1024], u.pixel)) inmap.meta['naxis1'] = 1024 inmap.meta['naxis2'] = 1024 # Now process magnetogram print('Processing data') processedmap, cosmap, limbmask = process_magnetogram.main( inmap, medianfilter=False) # Create AR masks print('Making core detections') detectionmap = detect.main(processedmap, limbmask) coredetectionmap, pslmap = detect_core.main(processedmap, detectionmap.data) print('SMART detections found: ', np.max(np.unique(coredetectionmap.data))) # Get properties print('Calculating properties') posprop = position_properties.main(processedmap, coredetectionmap.data, cosmap) magprop = magnetic_properties.main(processedmap, coredetectionmap.data, cosmap) pslprop = psl_properties.main(processedmap, coredetectionmap.data, doproj=False, projmaxscale=1024) # Output to json smartdate = inmap.date.strftime('%Y%m%d_%H%M') out = { 'meta': { 'dateobs': smartdate, 'dimension': inmap.dimensions[0].value, 'instrument': inmap.instrument } } out['posprop'] = posprop out['magprop'] = magprop out['pslprop'] = pslprop out = pd.io.json.dumps(out, data_dir + smartdate + '_properties.json') with open(data_dir + smartdate + '_properties.json', 'w') as outfile: outfile.write(out) # Beautifying it with open(data_dir + smartdate + '_properties.json') as infile: obj = json.load(infile) with open(data_dir + smartdate + '_properties.json', 'w') as outfile: json.dump(obj, outfile, indent=4, separators=(',', ': ')) # Visualise plot_detections.main(processedmap, coredetectionmap, pslmap, data_dir, smartdate) # Delete fits files sys_call = "".join(['rm -r {}'.format(data_dir + '*.fits')]) subprocess.call(sys_call, shell=True) # How long did that take? print('Runtime:', round(time.time() - start_time), 'seconds')
def python(ctx,config, script): ''' Runs a python script in the cloud. ''' make_rocket() click.echo('Generating Environment...') environment = detect.main() files = {'file': environment} r = requests.post(url+'/submit_project', files=files, auth=(config.username, config.password), timeout=20) if r.status_code == 500: click.echo('No workers available!! System overloaded :(') return worker_id = r.json()['workerid'] click.echo('Worker ID: ' + worker_id) print('Setting up container...', end='') spinner = itertools.cycle(['\\', '|', '/', '-']) while not container_ready(worker_id): sys.stdout.write(spinner.__next__()) sys.stdout.flush() time.sleep(0.5) sys.stdout.write('\b') click.echo('') container_info = get_container_info(worker_id) click.echo(container_info) #Now that the container info is present, it'd be #good to have a pem file ready. go ahead and #write that one. #this is the temp file we will store the rsa #key into. temp_key_file = tempfile.NamedTemporaryFile() temp_key_file.write(container_info['key'].encode('utf-8')) click.echo('temp file: ' + temp_key_file.name) #now that the container is ready, go ahead and #rsync it in. temp_key_file.seek(0) print(temp_key_file.read().decode('utf-8')) temp_key_file.seek(0) #'/Users/samcrane/Documents/GitHub/swcli/final_key.pem' key = temp_key_file.name host = container_info['ip_address'] user = '******'#container_info['username'] port = container_info['port'] #once Rsync is done, ssh in and run the command. # command = 'su -S rootpass' #run_container_command(key, user, host, port, command) #run_container_command(key, user, host, port, 'virtualenv venv') #run_container_command(key, user, host, port, 'source venv/bin/activate') rsync_to_container(key, user, host, port) command = 'cd /home/; pip install -r .__spacewire__/swpiprequirements.txt' run_container_command(key, user, host, port, command) command = 'cd /home/; python3 {0}'.format(click.format_filename(script)) run_container_command(key, user, host, port, command) command = 'cd /home/; ls' run_container_command(key, user, host, port, command) rsync_from_container(key, user, host, port) temp_key_file.close()
def PushButtonClicked2(self): from detect import main main()