def load_image(image_path): try: thresh = 0.24 hier_thresh = 0.5 areas = [] coords = [] img = pil_image.open(image_path) img = img.convert('RGB') width, height = img.size area = width * height outputs = pyyolo.test(image_path, thresh, hier_thresh, 0) print outputs for out in outputs: if out['class'] == 'car': if out['prob'] < 0.65: continue x1 = out['left'] x2 = out['right'] y1 = out['top'] y2 = out['bottom'] if abs((x2 - x1) * (y2 - y1)) < 0.3 * area: continue coords.append([x1, y1, x2, y2]) areas.append(abs((x2 - x1) * (y2 - y1))) if not coords: return False car_coord = coords[areas.index(max(areas))] scale_width = int((car_coord[2] - car_coord[0]) * 0.1) scale_height = int((car_coord[3] - car_coord[1]) * 0.1) car_coord[0] = car_coord[0] - scale_width car_coord[1] = car_coord[1] - scale_height car_coord[2] = car_coord[2] + scale_width car_coord[3] = car_coord[3] + scale_height car_coord = (max(int(car_coord[0]), 2), max(int(car_coord[1]), 2), min(int(car_coord[2]), width - 2), min(int(car_coord[3]), height - 2)) crop_img = img.crop(car_coord) return crop_img except: print("Load Error: %s" % image_path) return False
def detect_yolo_pyyolo(img_arr, url='', classes=constants.hls_yolo_categories): # from file print('----- test original C using a file') hash = hashlib.sha1() hash.update(str(time.time())) img_filename = hash.hexdigest()[:10] + 'pyyolo.jpg' # img_filename = 'incoming.jpg' cv2.imwrite(img_filename, img_arr) outputs = pyyolo.test(img_filename, thresh, hier_thresh) relevant_bboxes = [] for output in outputs: print(output) label = output['class'] xmin = output['left'] ymin = output['top'] xmax = output['right'] ymax = output['bottom'] item = { 'object': label, 'bbox': [xmin, ymin, xmax, ymax], 'confidence': '>' + str(thresh) } # item = {'object':label,'bbox':[xmin,ymin,xmax,ymax],'confidence':round(float(confidence),3)} relevant_bboxes.append(item) #not sure wht the diff is between second method and first # camera # print('----- test python API using a file') # i = 1 # while i < 2: # # ret_val, img = cam.read() # img = cv2.imread(filename) # img = img.transpose(2,0,1) # c, h, w = img.shape[0], img.shape[1], img.shape[2] # # print w, h, c # data = img.ravel()/255.0 # data = np.ascontiguousarray(data, dtype=np.float32) # outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh) # for output in outputs: # print(output) # i = i + 1 # free model pyyolo.cleanup() return relevant_bboxes
def predict_sample_image(): # From file print('----- test original C using a file') outputs = pyyolo.test(filename, thresh, hier_thresh, 0) for output in outputs: print(output) # Camera print('----- test python API using a file') i = 1 while i < 2: # ret_val, img = cam.read() orig_img = cv2.imread(filename) c, h, w, data = prepare_img(orig_img) outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh) draw_bounding_boxes(orig_img, outputs) cv2.imwrite("predicted.jpg", orig_img) i = i + 1
def crop_image(image_path): thresh = 0.24 hier_thresh = 0.5 areas = [] coords = [] im = cv2.imread(image_path) height, width, _ = im.shape area = width * height outputs = pyyolo.test(image_path, thresh, hier_thresh, 0) for out in outputs: if out['class'] == 'car': if out['prob'] < 0.65: continue x1 = out['left'] x2 = out['right'] y1 = out['top'] y2 = out['bottom'] if abs((x2 - x1) * (y2 - y1)) < 0.3 * area: continue coords.append([x1, y1, x2, y2]) areas.append(abs((x2 - x1) * (y2 - y1))) if not coords: os.remove(image_path) return False car_coord = coords[areas.index(max(areas))] scale_width = int((car_coord[2] - car_coord[0]) * 0.05) scale_height = int((car_coord[3] - car_coord[1]) * 0.05) car_coord[0] = car_coord[0] - scale_width car_coord[1] = car_coord[1] - scale_height car_coord[2] = car_coord[2] + scale_width car_coord[3] = car_coord[3] + scale_height car_coord = [ max(int(car_coord[0]), 2), max(int(car_coord[1]), 2), car_coord[2], car_coord[3] ] new_image = im[car_coord[1]:car_coord[3], car_coord[0]:car_coord[2]] cv2.imwrite(image_path, new_image)
def serve(): data = json.loads(request.data.decode('utf-8')) print "\n" print "[TIME] " + time.strftime("%d/%m/%Y") + " " + time.strftime( "%H:%M:%S") path_to_client_data = iof.assert_client_data_path(request) image_data, num_valid_images = iof.download_images(data, path_to_client_data) out = list() # print "[Classifying]...", num_valid_images, "images" err_count = 0 for idx in image_data: img_data = image_data[idx] out.append(dict()) o = out[-1] o["url"] = img_data["url"] o["img_resize"] = img_data["img_resize"] if img_data["path"] == None: o["pred"] = None continue try: o["pred"] = pyyolo.test(img_data["path"], thresh, hier_thresh, 0) if len(o["pred"]) == 0: o["pred"] = None except Exception as e: o["pred"] = None err_count += 1 print "[ERROR] while classifiying image\n\tsrc:", o[ "url"], "\n\tError:", e print "[+] Classified", num_valid_images - err_count, "/", num_valid_images, "images" shutil.rmtree(path_to_client_data) print "[+] Deleted downloaded images." print "[+] Returning predictions..." return jsonify(out)
def call_api(data): print data print "-" * 10 timestamp = str(int(time.time())) outdir = api_dir_path + "/" + timestamp os.makedirs(outdir) image_data, num_valid_images = download_images(data, outdir) print image_data print "num_valid_images", num_valid_images print "-" * 10 out = list() err_count = 0 for idx in image_data: img_data = image_data[idx] out.append(dict()) o = out[-1] o["url"] = img_data["url"] o["img_resize"] = img_data["img_resize"] if img_data["path"] == None: o["pred"] = None continue try: o["pred"] = pyyolo.test(img_data["path"], thresh, hier_thresh, 0) if len(o["pred"]) == 0: o["pred"] = None except Exception as e: o["pred"] = None err_count += 1 print "[ERROR] while classifiying image\n\tsrc:", o[ "url"], "\n\tError:", e print "[+] Classified", num_valid_images - err_count, "/", num_valid_images, "images" shutil.rmtree(outdir) print "[+] Deleted downloaded images." print "[+] Returning predictions..." return out
filename = '../annotation-tool/images/101323.jpg' thresh = 0.24 hier_thresh = 0.5 #cam = cv2.VideoCapture(0) #ret_val, img = cam.read() #print(ret_val) #if ret_val: # ret_val = cv2.imwrite(filename,img) # print(ret_val) pyyolo.init(darknet_path, datacfg, cfgfile, weightfile) # from file print('----- test original C using a file') outputs = pyyolo.test(filename, thresh, hier_thresh, 0) for output in outputs: print(output) # camera print('----- test python API using a file') i = 1 while i < 2: # ret_val, img = cam.read() img = cv2.imread(filename) img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) img = img.transpose(2, 0, 1) c, h, w = img.shape[0], img.shape[1], img.shape[2] # print w, h, c data = img.ravel() / 255.0 data = np.ascontiguousarray(data, dtype=np.float32)
def get_local_pyyolo_results(img_arr, url='', classes=constants.hls_yolo_categories, method='file'): # from file relevant_bboxes = [] if method == 'file': print('----- test original C using a file') hash = hashlib.sha1() hash.update(str(time.time())) img_filename = hash.hexdigest()[:10] + 'pyyolo.jpg' # img_filename = 'incoming.jpg' cv2.imwrite(img_filename, img_arr) outputs = pyyolo.test(img_filename, thresh, hier_thresh) #not sure what the diff is between this second method (pyyolo.detect) and first (pyyolo.test) #except it uses array instead of file # print('----- test python API using a file') else: i = 1 #wtf is this count for while i < 2: # ret_val, img = cam.read() # img = cv2.imread(filename) img = img_arr.transpose(2, 0, 1) c, h, w = img.shape[0], img.shape[1], img.shape[2] # print w, h, c data = img.ravel() / 255.0 data = np.ascontiguousarray(data, dtype=np.float32) print('calling pyyolo.detect') try: outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh) except: print('some trouble calling pyyolo detect,' + str(sys.exc_info()[0])) print('returned from pyyolo.detect') for output in outputs: print(output) i = i + 1 # free model for output in outputs: print(output) label = output['class'] if 'person' in label: label = 'person' #convert 'person_wearing_red/blue_shirt' into just person xmin = output['left'] ymin = output['top'] xmax = output['right'] ymax = output['bottom'] conf = output['prob'] item = { 'object': label, 'bbox': [xmin, ymin, xmax, ymax], 'confidence': round(conf, 4) } h, w = img_arr.shape[0:2] frac = 5 cropped_arr = img_arr[h / frac:h - (h / frac), w / frac:w - (w / frac)] dominant_color = imutils.dominant_colors(cropped_arr) print('dominant color:' + str(dominant_color)) if dominant_color is not None: item['details'] = {'color': dominant_color} # item = {'object':label,'bbox':[xmin,ymin,xmax,ymax],'confidence':round(float(confidence),3)} relevant_bboxes.append(item) # pyyolo.cleanup() return relevant_bboxes
weightfile = '../tiny-yolo.weights' filename = darknet_path + '/data/person.jpg' thresh = 0.24 hier_thresh = 0.5 cam = cv2.VideoCapture(-1) ret_val, img = cam.read() print(ret_val) if ret_val: ret_val = cv2.imwrite(filename,img) print(ret_val) pyyolo.init(darknet_path, datacfg, cfgfile, weightfile) # from file print('----- test original C using a file') outputs = pyyolo.test(filename, thresh, hier_thresh) for output in outputs: print(output) # camera print('----- test python API using a file') i = 1 while i < 2: # ret_val, img = cam.read() img = cv2.imread(filename) img = img.transpose(2,0,1) c, h, w = img.shape[0], img.shape[1], img.shape[2] # print w, h, c data = img.ravel()/255.0 data = np.ascontiguousarray(data, dtype=np.float32) outputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)