def obj_detect(): if 'file' not in request.files: return 'no image file uploaded' File = request.files['file'] # can check extension here before detecting if File.filename == '': return 'no file name provided' if File and allowed_file(File.filename): fn = secure_filename(File.filename) File.save(os.path.join(app.config['UPLOAD_FOLDER'], fn)) imagePath = os.path.join(app.config['UPLOAD_FOLDER'], fn) detect(imagePath) resp = send_from_directory(UPLOAD_FOLDER, fn) resp.headers['filename'] = fn os.remove(imagePath) return resp else: return 'file type is not supported'
def label(): req_data = request.get_json(force=True) print(req_data) root_save_dir = "{}/projects/{}/{}".format(drive, req_data["projectName"], req_data["dataType"]) save_dir = "{}/images".format(root_save_dir) Path(save_dir).mkdir(parents=True, exist_ok=True) imgdata = base64.b64decode(req_data["imageBase64"]) filename = next_path("{}/{}_%s.png".format(save_dir, req_data["label"])) with open(filename, "wb") as f: f.write(imgdata) f.close() print("saved: {}".format(filename)) save_label(req_data["label"], root_save_dir) result = [] if req_data["autoMakeSense"]: result = detect(filename, req_data["objectConfidenceThreshold"]) if len(result) > 0: lines = "" for item in result: for v in item: lines += str(v) + " " lines = lines.strip() + "\n" with open(filename + ".pre", "w") as f: f.write(lines.strip()) f.close() return json.dumps([labels, result])
def yolo_detect(net, img): detections = yolo.detect(net, img) filtered = yolo.filter(img, detections) detected, img = yolo.apply_nms(img, filtered, 0.4, 0.3) if detected: print("Meteoro detectado!\nImagem salva em detections/") timestmp = str(time.time()) name = "detections/" + timestmp.split(".")[0] + timestmp.split(".")[1] cv2.imwrite(name, img) return detected
def exportFrame(): event = controller.step(dict(action='Initialize', continuous=True)) cv2.imwrite('frame.jpg', event.cv2img) yolo.detect()
import yolo yolo.detect()
import os import cv2 import yolo image = yolo.detect('/yolo/test.png') cv2.imwrite('result.png', image)
[1024, 490], [1024, 544]], np.int32) pts = pts.reshape((-1, 1, 2)) imgL_detect = imgL.copy() cv2.fillConvexPoly(imgL_detect, pts, (0, 0, 0)) # Cover up car bonnet in display version of disparity map pts2 = np.array([[-55, 544], [-55, 515], [385, 388], [630, 388], [1024, 475], [1024, 544]], np.int32) pts2 = pts2.reshape((-1, 1, 2)) cv2.fillConvexPoly(disparity_display, pts2, (0, 0, 0)) windowName = 'left image' cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) cv2.resizeWindow(windowName, imgL.shape[1], imgL.shape[0]) # Perform YOLO detection classIDs, confidences, boxes = yolo.detect(imgL_detect, allowed_classes) # Sort objects according to disparity for i in range(0, len(boxes)): box = boxes[i] left = box[0] top = box[1] width = box[2] height = box[3] roi = disparity[max(top, 0):top + height, max(left, 0):left + width] disp = np.mean(roi[roi >= 0]) box.append(disp) box.append(classIDs[i]) boxes = sorted(boxes, key=lambda x: x[4], reverse=True) min_depth = 0
def main(): while True: # imposta il tempo di attesa fra un conteggio di persone e l'altro print("Orario di inizio detection: " + str(datetime.now())) t_end = datetime.now().replace(microsecond=0) + timedelta( seconds=DETECTIONS_INTERVAL) print("Orario di fine detection: " + str(t_end)) # leggi il file json contenente i dati delle webcam with open(PATH_WEBCAM_JSON) as f: json_data = json.load(f) # scorri fra tutte le webcams presenti nel file json for webcam in json_data["webcams"]: print("Raccolta dati per: " + webcam["location"] + ", " + webcam["city"]) # ottieni la temperatura e il meteo al tempo dell'acquisizione; try: temperature, weather_description = get_current_weather( webcam["latitude"], webcam["longitude"]) except: print('Exception nel weather') logging.info('Eccezione ore: ' + str(datetime.now())) logging.error(sys.exc_info()) continue #imposta l'orario e la data di acquisizione current_time = current_date = datetime.now() try: video_link = fetch_read_m3u8(webcam["link"], webcam["url_prefix"]) except: print("Failed to fetch/read m3u8") # continue # scarica il video dal link appena ricavato try: #urllib.request.urlretrieve(video_link, PATH_VIDEOS+"Video" + ".ts") frame_is_read, frame = extract_frame_from_video_url(video_link) print(frame_is_read) except: print('Exception: video non disponibile: ' + webcam["location"]) logging.info('Eccezione ore: ' + str(datetime.now())) logging.error(sys.exc_info()) continue try: frame_part = cut_frame_in_six(frame) except: print('Exception') logging.info('Eccezione ore: ' + str(datetime.now())) logging.error(sys.exc_info()) continue persone_contate = 0 # conta le persone in ogni sottoframe for frame in frame_part: persone_contate = persone_contate + detect(frame) print("Persone: " + str(persone_contate)) data = { 'id_webcam': webcam["id_webcam"], 'city': webcam["city"], 'location': webcam["location"], 'latitude': webcam["latitude"], 'longitude': webcam["longitude"], 'numPeople': persone_contate, 'date': current_date.strftime('%Y-%m-%d %H:%M:%S.%f'), 'time': current_time.strftime('%Y-%m-%d %H:%M:%S.%f'), 'type': 0, 'weather_description': weather_description, 'temperature': temperature, 'day_of_week': datetime.now().date().weekday() } producer.send('gdp', value=data) print("ho fatto kafka") # inserisci i risultati nel db print("waiting for: " + str((t_end - datetime.now()).total_seconds())) time.sleep((t_end - datetime.now()).total_seconds())
def laptop_recommandations(product): c = [categories[product].split(',')[0:4]] for i in range(len(product_names)): if categories[product].split(',')[0:3] == categories[i].split( ',')[0:3] and categories[i].split(',')[0:4] not in c: c.append(categories[i].split(',')[0:4]) del c[4] del c[4] del c[4] del c[-1] rec = [] distances = [] for i in range(len(product_names)): if product_names[i] != product_names[product] and str( product_descriptions[i]) != 'nan' and str( dsc_image_urls[i]) != 'nan' and categories[i].split( ',')[0:4] == c[0]: d = 1 - similarity_test.evaluateSimilarity(product, i, data) distances.append((d, i)) distances = sorted(distances) rec = [id[distances[0][1]]] titles = [product_names[distances[0][1]]] n = 5 #number of similar products i = 1 while len(titles) < n: if product_names[distances[i][1]] not in titles: titles.append(product_names[distances[i][1]]) rec[0] += ' ' + id[distances[i][1]] i += 1 rec.append('') for k in range(1, len(c)): distances = [] for i in range(len(product_names)): if product_names[i] != product_names[product] and str( product_descriptions[i]) != 'nan' and str( dsc_image_urls[i]) != 'nan' and categories[i].split( ',')[0:4] == c[k]: d = 1 - similarity_test.evaluateSimilarity(product, i, data) distances.append((d, i)) distances = sorted(distances) admissibles = [] j = 0 for l in range(5): img = requests.get(dsc_image_urls[distances[j][1]]).content name = 'product.jpg' with open(yolo.image_path + name, 'wb') as handler: handler.write(img) y = yolo.detect(name) while product_names[distances[j][1]] in titles or y == [ 'laptop' ] or 'tvmonitor' in y: j += 1 img = requests.get(dsc_image_urls[distances[j][1]]).content name = 'product.jpg' with open(yolo.image_path + name, 'wb') as handler: handler.write(img) y = yolo.detect(name) admissibles.append(distances[j][1]) j += 1 i1, i2 = random.sample(admissibles, 2) rec[-1] += ' ' + id[i1] + ' ' + id[i2] titles.append(product_names[i1]) titles.append(product_names[i2]) rec[-1] = rec[-1][1:] return rec
quotechar='"', escapechar=' ', quoting=csv.QUOTE_NONE) for i in range(len(product_names)): if categories[ i] == 'Root Category, Sporting Goods, Outdoor Recreation, Cycling, Bikes' and str( dsc_image_urls[i]) != 'nan' and str( product_descriptions[i]) != 'nan': img = requests.get(dsc_image_urls[i]).content img_name = product_names[i].replace('"', '').replace( "/", "").replace("\\", "").replace(":", "").replace( "*", "").replace("?", "").replace("<", "").replace( ">", "").replace("|", "").replace(".", "") + '.jpg' with open(yolo.image_path + img_name, 'wb') as handler: handler.write(img) if 'bicycle' in yolo.detect(img_name): rec = bike_recommandations(i, img_name) if len(rec[1].split(' ')) == 6: print(i) r.writerow([id[i]] + rec) with open(r'ps4.csv', mode='w', newline="") as csvfile: r = csv.writer(csvfile, delimiter=',', quotechar='"', escapechar=' ', quoting=csv.QUOTE_NONE) for i in range(len(product_names)): if categories[ i] == 'Root Category, Gaming, Playstation, PlayStation 4, Consoles' and str( dsc_image_urls[i]) != 'nan' and str(