def __init__(self): super().__init__() self.setupUi(self) self.camera = Camera(0) self.main_view.initialize(camera=self.camera) self.has_scratches = False self.has_holes = False self.has_bad_legs = False self.has_object = False self.cnt_img = 0 self.cnt_defect = 0 self.detections = [] self.bg_sub = cv2.createBackgroundSubtractorMOG2(4, detectShadows=False) path = os.path.join(os.getcwd(), 'bg.jpg') bg_image = cv2.imread(path) if os.path.exists(path) else np.ones( (1, 1, 3)) self.bg_sub.apply(bg_image) self.main_view.new_frame.connect(self.new_frame) self.next_defect_button.clicked.connect(self.incr_cnt) self.prev_defect_button.clicked.connect(self.decr_cnt) self.network_handler = NetworkHandler( os.path.join(os.getcwd(), 'weights'))
def main(): camera = Camera() print 'Camera is ready' plate = Plate() status = True x = 0 while x < 10: print x x = x + 1 camera.capture() print 'image captured' plate_detected = plate.detect_plate() time.sleep(1) if plate_detected: print 'Found!!!!!' distance_from_center = plate.distance_from_center(camera.IMAGE_CENTER) #drone.move(distance_from_center) move(distance_from_center) print "move! back!! you have two seconds!!!!" time.sleep(2)
def main(): drone = Drone("radio://0/80/2M") camera = Camera() drone.take_off() keep_flying = True plate = Plate() x = 0 while x < 15: print x camera.capture() detected = plate.detect_plate() print camera.IMAGE_CENTER if detected: distance_from_center = plate.distance_from_center( camera.IMAGE_CENTER) drone.move(distance_from_center) else: print "nothing found" time.sleep(1) x = x + 1 time.sleep(5) drone.land() drone.disconnect() camera.shutdown_camera() time.sleep(1)
def __init__(self): super().__init__() self.setupUi(self) self.camera = Camera() self.microscopeView.initialize(camera=self.camera) self.microscopeView.setEnabled(True) self.main_path = os.getcwd() self.path = os.path.join(self.main_path, 'data') self.database_editor = DatabaseEditor(self.camera, self.path) self.network_handler = NetworkHandler(self.main_path) self.training_process = None self.timer = QTimer() self.timer.setInterval(1000 // 5) self.timer.start() self.current_progress = Value('i', 0) self.overall_progress = Value('i', 0) self.train_time_start = Value('f', 0) self.train_time_end = Value('f', 0) self.connect() self.display_classes()
def __init__(self): super().__init__() self.setupUi(self) self.detect_video_devices() self.camera = Camera( self.camera_swithcer.itemData(self.camera_swithcer.currentIndex())) self.main_view.initialize(camera=self.camera) self.has_scratches = False self.has_holes = False self.has_bad_legs = False self.has_object = False self.cnt_img = 0 self.cnt_defect = 0 self.detections = [] self.current_detections = [] self.sz_x = 0 self.sz_y = 0 self.detect_legs = True self.detect_holes = True self.detect_scratches = True self.network_handler = NetworkHandler( os.path.join(os.getcwd(), 'weights')) self.database_editor = DatabaseEditor( self.camera, os.path.join(os.getcwd(), 'data')) self.bg_sub = cv2.createBackgroundSubtractorMOG2(4, detectShadows=False) path = os.path.join(os.getcwd(), 'bg.jpg') bg_image = cv2.imread(path) if os.path.exists(path) else np.ones( (1, 1, 3)) self.bg_sub.apply(bg_image) self.main_view.new_frame.connect(self.new_frame) self.next_button.clicked.connect(self.incr_cnt) self.prev_button.clicked.connect(self.decr_cnt) self.change_database.clicked.connect(self.show_database_editor) self.camera_swithcer.currentIndexChanged.connect(self.change_camera) self.legs_checkbox.stateChanged.connect(self.checkbox_changed) self.holes_checkbox.stateChanged.connect(self.checkbox_changed) self.scratches_checkbox.stateChanged.connect(self.checkbox_changed) self.frame.connect(self.main_view.acquire_frame)
def get(self): obj = getiMASAppDetailsById(common.appID) return Response(gen(Camera(), obj), mimetype='multipart/x-mixed-replace; boundary=frame')
def change_camera(self, idx): self.camera = Camera(self.camera_swithcer.itemData(idx)) self.main_view.initialize(camera=self.camera)
r0 = world.objects[1].center if config['trajectory'] == 'circular': x_traj, y_traj, z_traj = u.get_cam_trajectory(config) for n, (i, j, k) in enumerate(zip(x_traj, y_traj, z_traj)): lookfrom = torch.tensor((i, j, k)) if prev_poses is not None: if (torch.abs(lookfrom - prev_poses) < eps).all(1).any(): print(str(n) + ' is already done') continue print('Generating image ' + str(n)) lookfrom.to(dev) cam = Camera(config, r0, lookfrom=lookfrom, device=dev) with torch.no_grad(): image = cam.render(world) image.save(f'{config["run_folder"]}/imgs/img_{n}.png', flip=True) results_dict = { 'file_path': f'/imgs/img_{n}.png', 'transform_matrix': u.listify_matrix(cam.matrix_world) } u.update_json(prev_json_path, results_dict) elif config['trajectory'] == 'random': if config['overwrite']: it = range(0, config['total_points']) else:
'cpu') config = read_config('config.yml') material_ground = Material('lambertian', torch.tensor((0.5, 0.5, 0.5), device=dev)) material_center = Material('lambertian', torch.tensor((0.1, 0.2, 0.5), device=dev)) material_left = Material('lambertian', torch.tensor((224, 90, 90), device=dev)) material_right = Material('metal', torch.tensor((250, 248, 202), device=dev)) # World world = World() world.add(Sphere(torch.tensor((0.0, -1000., 0)), 1000., material_ground)) world.add(Sphere(torch.tensor((0.0, 1.0, 0.)), 1, material_center)) # world.add(Sphere(torch.tensor((-0.25, 0.5, 1.5)), 0.5, material_left)) # world.add(Sphere(torch.tensor((-0.5, 0.5, -1.5)), 0.5, material_right)) lookfrom = torch.tensor((-2, 1, 1.)) lookat = torch.tensor((0., 0., 0.)) vup = torch.tensor((0., 1., 0.)) cam = Camera(lookfrom, lookat, vup, config['fov'], config['image_width'], config['aspect_ratio'], config['render_depth']) with torch.no_grad(): image = cam.render(world, antialiasing=config['antialiasing']) image.show(flip=True) image.save('output/test_1.png', flip=True)
'cpu') config = read_config(config_path) light1_pos = eval(config['light1_pos']) light2_pos = eval(config['light2_pos']) material_red = Material('lambertian', torch.tensor((0.8, 0.023, 0.011), device=dev)) material_blue = Material('lambertian', torch.tensor((0.004, 0.023, 0.8), device=dev)) # World world = World(dev) world.add(Sphere(torch.tensor((0., 0., 3)), 2, material_blue, device=dev)) # Front sphere world.add(Sphere(torch.tensor((0., 0., -3)), 2, material_red, device=dev)) # Back sphere world.add_light(torch.tensor(light1_pos, device=dev)) world.add_light(torch.tensor(light2_pos, device=dev)) r0 = torch.tensor((0., 0., -3.), device=dev) cam = Camera(config, r0, dev) with torch.no_grad(): image = cam.render(world) # image.show(flip=True) image.save('render.png', flip=True)
class MainWindow(QMainWindow, main.Ui_MainWindow): def __init__(self): super().__init__() self.setupUi(self) self.camera = Camera() self.microscopeView.initialize(camera=self.camera) self.microscopeView.setEnabled(True) self.main_path = os.getcwd() self.path = os.path.join(self.main_path, 'data') self.database_editor = DatabaseEditor(self.camera, self.path) self.network_handler = NetworkHandler(self.main_path) self.training_process = None self.timer = QTimer() self.timer.setInterval(1000 // 5) self.timer.start() self.current_progress = Value('i', 0) self.overall_progress = Value('i', 0) self.train_time_start = Value('f', 0) self.train_time_end = Value('f', 0) self.connect() self.display_classes() def connect(self): self.databaseEditButton.clicked.connect(self.show_database_editor) self.database_editor.close_event.connect(self.enable_videostream) self.listView.itemClicked.connect(self.display_item) self.startTrainingButton.clicked.connect(self.train_network) self.camera.camera_err.connect(self.camera_error) self.database_editor.database_handler.update_classes.connect( self.display_classes) # self.microscopeView.new_frame.connect(self.new_frame) self.timer.timeout.connect(self.update_event) def update_event(self): if not self.database_editor.stream_enabled \ and (not self.training_process or self.training_process and not self.training_process.is_alive()): frame = self.camera.get_frame() if frame is not None: data, image = self.network_handler.detect(frame) print(data) self.microscopeView.setEnabled(False) self.microscopeView.frame = image self.microscopeView.update() else: self.microscopeView.setEnabled(True) self.trainProgressBar.setMaximum(self.overall_progress.value - 1) self.trainProgressBar.setValue(self.current_progress.value) if self.training_process and self.training_process.is_alive(): self.statusbarDisplay.setText('Training is in progress...') else: self.statusbarDisplay.clear() def camera_error(self): msg = 'Cannot get frames from camera!\nProceed to exit the application.' QMessageBox.critical(self, 'Error!', msg, QMessageBox.Close) self.close() def train_network(self): if self.training_process is None or not self.training_process.is_alive( ): self.training_process = Process( target=self.network_handler.train_network, args=(self.current_progress, self.overall_progress, self.train_time_start, self.train_time_end)) self.training_process.start() message = 'Training process started. The application may slow down.\n' \ 'Please, do not close the application until training\'s done!\n' \ 'You can check the progress down below at sidebar!' self.statusbar.setVisible(True) self.trainProgressBar.setMaximum( self.overall_progress.value - 1 if self.overall_progress.value > 0 else self. overall_progress.value) self.trainProgressBar.setValue(self.current_progress.value) QMessageBox.information(self, 'Success!', message, QMessageBox.Ok) else: QMessageBox.warning(self, 'Attention', 'Training is still in progress!', QMessageBox.Ok) def stop_training_dialog(self): yes, cancel = QMessageBox.Yes, QMessageBox.Cancel message = 'Neural network training is still in progress.\nContinue exit?' return QMessageBox.warning(self, 'Warning!', message, yes | cancel) def show_database_editor(self): self.microscopeView.setEnabled(False) self.database_editor.stream_enabled = True self.database_editor.show() def enable_videostream(self): self.database_editor.stream_enabled = False self.microscopeView.setEnabled(True) def display_classes(self): self.listView.clear() self.listView.addItems( self.database_editor.database_handler.ideal_images.keys()) def display_item(self, item=None): logs = self.database_editor.database_handler.ideal_images if not item and self.listView.selectedItems(): item = self.listView.selectedItems()[0] if len(logs): path = logs[item.text()] if item else list(logs.values())[0] image = cv2.imread(path) scale = (self.databaseComponentView.size().width() - 2) / image.shape[1] image = cv2.resize(image, None, fx=scale, fy=scale) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = qimage2ndarray.array2qimage(image) self.databaseComponentView.setPixmap(QPixmap.fromImage(image)) def resizeEvent(self, ev): self.display_item() super(MainWindow, self).resizeEvent(ev) def closeEvent(self, ev): if self.training_process and self.training_process.is_alive(): action = self.stop_training_dialog() if action == QMessageBox.Yes: self.training_process.terminate() self.camera.cap.release() cv2.destroyAllWindows() sys.exit() else: ev.ignore() else: sys.exit()
def get(self): return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
duration = int(sys.argv[1]) except IndexError: duration = 20 # Define the duration (in seconds) of the video capture here captureDuration = duration # Create Filename based on timestamp # Capture time of Video dt = datetime.datetime.today() timestamp = dt.strftime("%Y-%m-%d_%H:%M:%S") filePath = os.path.join(os.getcwd(), 'output', timestamp + '.avi') # Capture and Save video on local disc print 'Recording for {duration} seconds . . .'.format(duration=captureDuration) cam = Camera() if (cam.record(filePath, captureDuration, is_display_frame=False)): print "Video Captred. File saved as '{filePath}'".format(filePath=filePath) # Upload File to S3 print 'Uploading file to S3 . . .' file = FileHandler(filePath) file.upload() print 'File uploaded successfully, Generating Url to access video' s3Url = file.getS3PresignedUrl() # Email S3Url to user for viewing print 'Sending Email . . .' email = Email() if (email.success(RECIPIENT_EMAIL, file.fileBasename, s3Url)):
duration = int(sys.argv[1]) except IndexError: duration = 20 # Define the duration (in seconds) of the video capture here captureDuration = duration # Create Filename based on timestamp # Capture time of Video dt = datetime.datetime.today() timestamp = dt.strftime("%Y-%m-%d_%H:%M:%S") filePath = os.path.join(os.getcwd(), 'output', timestamp + '.avi') # Capture and Save video on local disc print 'Recording for {duration} seconds . . .'.format(duration=captureDuration) cam = Camera() cam.record(filePath, captureDuration, False) print 'Recording completed' # Upload File to S3 print 'Uploading file to S3 . . .' file = FileHandler(filePath) file.upload() print 'File uploaded successfully, Generating Url to access video' s3Url = file.getS3PresignedUrl() # Email S3Url to user for viewing print 'Sending Email . . .' email = Email() if (email.send('*****@*****.**', file.fileBasename, s3Url)): print "File successfully uploaded, Email sent !"
from libs.camera import Camera from libs.plate import Plate plate_width = 0.073 plate = Plate() camera = Camera() x = 0 while x < 20: print x x = x + 1 camera.capture() detected = plate.detect_plate() if detected: print plate.distance_from_camara() pixal_width = plate.topRight['x'] - plate.bottemLeft['x'] focal = (pixal_width * 0.5) / 0.073 print focal