def detect_objects(request_filename): """ Chama o método para detectar objetos em uma imagem :param request_filename: nome o arquivo da imagem :return: imagem com seus objetos detectados """ detect_objects(request_filename)
def testDetectObjects(self): generator = object_detection.read_video_file( "testdata/test_video_1.mp4") output = object_detection.detect_objects(generator, threshold_score=0.5) self.assertLen(output, 5) class_names = [class_name for class_name, _ in output[0]] scores = [score for _, score in output[0]] self.assertIn("apple", class_names) self.assertGreaterEqual(min(scores), 0.5) class_names = [class_name for class_name, _ in output[1]] scores = [score for _, score in output[0]] self.assertIn("apple", class_names) self.assertGreaterEqual(min(scores), 0.5) class_names = [class_name for class_name, _ in output[2]] scores = [score for _, score in output[0]] self.assertEqual(class_names, ["cell phone"]) self.assertGreaterEqual(min(scores), 0.5) class_names = [class_name for class_name, _ in output[3]] scores = [score for _, score in output[0]] self.assertEqual(class_names, ["cell phone"]) self.assertGreaterEqual(min(scores), 0.5) class_names = [class_name for class_name, _ in output[4]] scores = [score for _, score in output[0]] self.assertEqual(class_names, ["dog"]) self.assertGreaterEqual(min(scores), 0.5)
def main(): args = parser.parse_args() print("50") if args.input_image_glob: if args.input_video_path: raise ValueError( "--input_image_glob and --input_video_path are mutually exclusive") if args.frame_rate is None: raise ValueError( "When --input_image_glob is provided, --frame_rate must be provided") frame_generator = object_detection.read_images( args.input_image_glob, frame_rate=args.frame_rate) timestep_s = 1.0 / args.frame_rate else: if not args.input_video_path: raise ValueError( "One of --input_image_glob and --input_video_path must be provided") frame_generator = object_detection.read_video_file(args.input_video_path) timestep_s = 1.0 / object_detection.get_video_fps(args.input_video_path) # TODO(cais): Support variable frame rate in video file. events = object_detection.detect_objects(frame_generator) tsv_rows = events_lib.convert_events_to_tsv_rows( events, tsv_data.VISUAL_OBJECTS_EVENTS_TIER, timestep_s=timestep_s) with open(args.output_tsv_path, mode="w") as f: tsv_writer = csv.writer(f, delimiter="\t") tsv_writer.writerow(tsv_data.COLUMN_HEADS) for row in tsv_rows: tsv_writer.writerow(row)
def build_serial_response(stream, received): image = open_image_linux(stream) cv2.imwrite(image_name, image) if received == 'request': mode = 1 elif received == 'request_2': mode = 2 centers, dists, rots = detect_objects(image, color_params, min_contour_radius, homography_params, mode, gui=False, logg=True) dists, rots = dists[0], rots[0] # Consider only first mask because we have only one target colour for now output = '' # Consider the contour the most on the left only output += str(round(rots[0], 2)) + ' ' if len(rots) > 0 else '' output += str(round(dists[0], 2)) if len(dists) > 0 else '' if logg: print('Detected position: {}'.format(output)) return 'rpi_response ' + output + '\n'
def display_image(self, path): image = cv2.imread(path) scale_X = 2.0 scale_Y = 2.0 if not self.toggle_rec_check.isChecked(): boxes = object_detection.detect_objects( image, float(self.conf_line.text()), float(self.threshold_line.text())) image = cv2.resize(image, (0, 0), fx=scale_X, fy=scale_Y) for x0, y0, x1, y1, confidence, class_num in boxes: x0, y0, x1, y1 = int(scale_X * x0), int(scale_Y * y0), \ int(scale_X * x1), int(scale_Y * y1) color = None text = '' if class_num == 0: color = (0, 255, 0) text = 'left' elif class_num == 1: color = (255, 0, 0) text = 'right' elif class_num == 2: color = (0, 0, 255) text = 'bad' if color: cv2.rectangle(image, (x0, y0), (x1, y1), color, 1, cv2.LINE_AA) cv2.putText(image, text + ' (' + str(confidence) + ')', (x0, y0 - 2), 1, 1, color, 1, cv2.LINE_AA) else: image = cv2.resize(image, (0, 0), fx=scale_X, fy=scale_Y) height, width, channel = image.shape bytes_per_line = 3 * width qimg = QImage(image.data, width, height, bytes_per_line, QImage.Format_RGB888) self.image_label.setPixmap(QPixmap.fromImage(qimg))
def upload_file(): print('here upload') if request.method == 'POST': file = request.files['file'] if file: filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) execution_path = os.getcwd() uploads_static_path = os.path.join(execution_path, 'static') upload_path = os.path.join(uploads_static_path, 'uploads') result_static_path = os.path.join(execution_path, 'static') result_path = os.path.join(result_static_path, 'result') out_name = os.path.join(result_path, filename) in_name = os.path.join(upload_path, filename) detections = detect_objects(in_name, out_name, detector) return {'response': 'OK', 'filename': filename} return {response: 'Error'}