def __init__(self): self.methods_map = { SystemWorkStates.image.value: self.work_on_image, SystemWorkStates.video.value: self.work_on_video, SystemWorkStates.webcam.value: self.work_on_webcam } self.imagecapture = ImageCapture()
def __init__(self) -> None: super().__init__() self.window = QtWidgets.QMainWindow() self.init_ui() self.imageCaptureDelegate = ImageCapture() #call image capture class
def main(): # Capture and image_capture = ImageCapture(dir_path=IMAGE_DIR_PATH, n_camera=2) image_capture.capture() image_path = image_capture.last_captured_path # Predict model_path = MODEL_PATH client = LocalPredictApiClient(model_path) pred_result = client.predict(image_path) # Save prediction_data = PredictionData( image_path=image_path, prediction=pred_result['prediction'], json_result=str(pred_result), capture_datetime_local=image_capture.capture_datetime_local, capture_datetime_utc=image_capture.capture_datetime_utc) prediction_ds = PredictionDataStore(DB_DIR_PATH) prediction_ds.save(prediction_data)
newpath = cwd + "/" + datafolder if not os.path.exists(newpath): os.makedirs(newpath) imagefile = datafolder + filename + '.png' textfile = open(datafolder + filename + '.txt', 'a') file_operations.save_to_folder(textfile, imagefile, bounding_box, final) # todo get classes through GUI classes = [] #interface = CLI() file_operations = FileOperations() motor = MotorControl() camera = ImageCapture(RPI_CAMERA) image_processor = ImageProcessing(camera.capture()) delay = 1 / 1000.0 #images = input("How many images do you want per category (5 categories)?") images = 10000 STEPS_FOR_FULL_CIRCLE = 12360 steps = int(STEPS_FOR_FULL_CIRCLE / images) classes = ["Banana"] #, "Rolle"] only_snippets = False only_train_images = True ## Section for the configuration # Make images for every class for label in classes:
from image_capture import ImageCapture from image_processing import ImageProcessing import cv2 import numpy as np camera = ImageCapture() #camera = cv2.VideoCapture(0) processor = ImageProcessing() while True: img_raw = camera.capture() img_cut = img_raw[:, int(np.shape(img_raw)[1] * 1 / 5):int(np.shape(img_raw)[1] * 4 / 5), :] img_gray = processor.gray(img_cut) edge = processor.canny(img_gray) contour = processor.max_contour(edge) cv2.drawContours(img_cut, contour, -1, (0, 255, 0), 3) bounding_box = processor.bounding_box(contour) print(bounding_box) #if bounding_box != -1: # print("success!") #else: # print("failure!") cv2.imshow('raw_image', img_raw) cv2.imshow('cut_image', img_cut) cv2.imshow('gray_image', img_gray) cv2.imshow('canny_image', edge) cv2.waitKey(20)
def run(self): """ Main Challenge method. Has to exist and is the start point for the threaded challenge. """ # Startup sequence if self.core_module is None: # Initialise GPIO GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) # Instantiate CORE / Chassis module and store in the launcher. self.core_module = core.Core(GPIO) # Limit motor speeds in AutoMode self.core_module.set_speed_factor(0.6) # 0.6 on old motors self.core_module.enable_motors(True) # wait for the user to enable motors while not self.core_module.motors_enabled(): time.sleep(0.25) # Load our previously learned arena colour order self.camera = picamera.PiCamera() self.processor = StreamProcessor(self.core_module, self.camera) print('Wait ...') time.sleep(2) filename = "arenacolours.txt" if os.path.isfile(filename): with open(filename) as f: content = f.readlines() if len(content) > 0: self.processor.arenacolours = [x.strip() for x in content] self.processor.state = State.ORIENTING f.close() # Setup the camera frameRate = 30 # Camera image capture frame rate self.camera.resolution = (self.processor.imageWidth, self.processor.imageHeight) self.camera.framerate = frameRate self.camera.awb_mode = 'off' # Load the exposure calibration redgain = 1.5 # Default Gain values bluegain = 1.5 filename = "rbgains.txt" if os.path.isfile(filename): with open(filename) as f: content = f.readlines() content = [x.strip() for x in content] redgain = float(content[0][2:]) bluegain = float(content[1][2:]) f.close() self.camera.awb_gains = (redgain, bluegain) self.captureThread = ImageCapture(self.camera, self.processor) try: # Loop indefinitely until we are no longer running while self.processor.state != State.FINISHED: # Wait for the interval period # time.sleep(0.1) except KeyboardInterrupt: print("User shutdown\n") except Exception as e: print(e) self.core_module.enable_motors(False) self.processor.state = State.FINISHED self.captureThread.join() self.processor.terminated = True self.processor.join() self.camera.close() # Ensure camera is release # del self.camera self.camera = None print("Challenge terminated")
def __init__(self) -> None: super().__init__() self.init_ui() self.imageCaptureDelegate = ImageCapture() self.translateDelegate = ImageTranslator()
import os import subprocess from telebot import TeleBot from threading import Event from config import config from image_capture import ImageCapture from motion_monitor import MotionMonitor root = os.path.dirname(os.path.abspath(__file__)) bot_conf = config(os.path.join(root, 'config.json')).get_config() telebot = TeleBot(token=bot_conf['TELEGRAM'].get("BOT_API_TOKEN", ""), threaded=False) messages = bot_conf['TELEGRAM_MESSAGES'] image_capture = ImageCapture() @telebot.message_handler(commands=['start']) def start(message): telebot.reply_to(message, messages.get("START", "")) @telebot.message_handler(commands=['help']) def help(message): help = messages.get("HELP", "") for h in messages.get("HELP_COMMANDS", []): help += "\n " help += h telebot.reply_to(message, help) @telebot.message_handler(commands=['capture'])