예제 #1
0
 def __init__(self, parent, expected_memory):
     self.log = logs.logger.add_module("Threading")
     QThread.__init__(self, parent)
     self.cam = Camera(expected_memory)
     self.cmd_fifo = queue.Queue()
     self.available_space = self.cam.get_available_space()
     self.filename = ""
예제 #2
0
class EtrCalibration(ConfiguredClient):
    """A simple class to convey data from multiplexer (UGM_UPDATE_MESSAGE)
    to ugm_engine using udp. That level of comminication is needed, as
    pyqt won`t work with multithreading..."""

    @log_crash
    def __init__(self, addresses):
        super(EtrCalibration, self).__init__(addresses=addresses, type=peers.ETR_CALIBRATION)

        HOST = self.get_param("rcv_ip")
        PORT = int(self.get_param("rcv_port"))
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.bind((HOST, PORT))
        self.socket.listen(1)
        self.conn, addr = self.socket.accept()
        self.ready()
        self.logger.info("Start initializin etr amplifier...")

        self.init()
        self.initCamera()

    def __del__(self):
        self.socket.close()

    def init():
        self._treshold = 200
        self.invMatrix = np.eye(3)

    def initCamera(self):
        self.cam = Camera()
        self.cam.setTreshold(self._treshold)

    def run(self):
        try:
            while True:

                l_data = self.conn.recv(1024)
                msg = variables_pb2.Variable()
                msg.ParseFromString(l_data)
                # ~ msg.key, msg.value # jedno z nich to timestamp i start_calibration
                l_msg = None  # tu bedzie parsowanie wiadomosci o starcie i koncu kalibracji
                if l_msg is not None:
                    pass

                # tu pewnie będzie czytanie obrazu z kamery i po otrzymaniu info o stopie kalibracji
                # pewnie bedzie odpalenie send_resultsc
        finally:
            self.socket.close()

    def _send_results(self):
        r = variables_pb2.Sample()
        r.timestamp = time.time()
        for i in range(8):
            r.channels.append(random.random())
        self.conn.send_message(message=r.SerializeToString(), type=types.ETR_CALIBRATION_RESULTS, flush=True)
예제 #3
0
class EtrCalibration(ConfiguredClient):
    """A simple class to convey data from multiplexer (UGM_UPDATE_MESSAGE)
    to ugm_engine using udp. That level of comminication is needed, as
    pyqt won`t work with multithreading..."""
    @log_crash
    def __init__(self, addresses):
        super(EtrCalibration, self).__init__(addresses=addresses, type=peers.ETR_CALIBRATION)
        
        HOST = self.get_param('rcv_ip')
        PORT = int(self.get_param('rcv_port'))
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.bind((HOST,PORT))
        self.socket.listen(1)
        self.conn, addr = self.socket.accept()
        self.ready()
        self.logger.info("Start initializin etr amplifier...")
        
        self.init()
        self.initCamera()

    def __del__(self):
        self.socket.close()
        
    def init():
        self._treshold = 200
        self.invMatrix = np.eye(3)
        
    def initCamera(self):
        self.cam = Camera()
        self.cam.setTreshold(self._treshold)

    def run(self):
        try:
            while True:

                l_data = self.conn.recv(1024)
                msg = variables_pb2.Variable()
                msg.ParseFromString(l_data)
                #~ msg.key, msg.value # jedno z nich to timestamp i start_calibration
                l_msg = None #tu bedzie parsowanie wiadomosci o starcie i koncu kalibracji
                if l_msg is not None:
                    pass
                
                #tu pewnie będzie czytanie obrazu z kamery i po otrzymaniu info o stopie kalibracji
                #pewnie bedzie odpalenie send_resultsc
        finally:
            self.socket.close()

    def _send_results(self):
        r = variables_pb2.Sample()
        r.timestamp = time.time()
        for i in range(8):
            r.channels.append(random.random())
        self.conn.send_message(message = r.SerializeToString(), 
                               type = types.ETR_CALIBRATION_RESULTS, flush=True)
예제 #4
0
def cam_update_img():
    global cam
    if not cam:
        cam = Camera(800, 600)
    try:
        cam.start_once(get_yolo_result)
        # _thread.start_new_thread(hello, (1234, ))
        return jsonify({
            'success': True,
        })
    except:
        print("Error: start thread error ")
        return jsonify({
            'success': False,
        })
 def __init__(self, mrz_port="/dev/ttyACM0"):
     with timer.time(f'load-total'):
         self.card_reader = CardReader()
         self.card_reader.set_mrz_port(mrz_port)
         with timer.time(f'load-mtcnn'):
             self.mtcnn = MTCNN()
             self.mtcnn.init(480, 640)
         with timer.time(f'load-facenet'):
             self.face_embedder = FaceEmbedder()
         self.camera = Camera()
         #
         self.camera_face = None
         self.camera_emb = None
         self.reader_face = None
         self.reader_emb = None
class Main:
    def __init__(self):
        self.camera = Camera()
        with timer.time(f'load'):
            self.mtcnn = MTCNN()
            self.mtcnn.init(480,640)

    def main(self, manual_brake=None):
        while not manual_brake:
            frame = self.camera.read()
            try:
                with timer.time(f'inference'):
                    bboxes = self.mtcnn.findFace(frame) # [3,h,w]
                    for x1,y1,x2,y2 in bboxes:
                        cv2.rectangle(frame, (x1,y1), (x2,y2), (255,0,0), 2)
            except Exception as e: # face not found
                print(e)
                continue
            manual_brake = self.camera.show_frame(frame)
        self.camera.clean()
        timer.print_stats()
예제 #7
0
def main(): 
    '''
    Main loop that initializes camera and other 
    '''
    
    args = parse_args()

    config = Config()

    #Start Camera async class    
    cam = Camera(args)
    cam.start()

    #Start io writing queue
    write_queue = WriteQueue()
    write_queue.start()
    if config.Inference.mode == 'detect':
        print('Running detection')
        detector = OpenVinoDetectorAsync(config.Inference)
    else:
        detector = OpenVinoClassifierAsync(config.Inference)

    while True: 
        _,frame = cam.read()
        start_time = time.time()
        infer_frame = deepcopy(frame)
        detections = detector.run(infer_frame)
        timestamp = datetime.now(tz=timezone.utc).strftime('%Y-%m-%d-%H-%M-%S-%f')
        path = 'tmp/' + timestamp + '.jpg'
        if detections:
            '''
            Need to fix the bounding box locations
            for detection in detections:
                xmin = detection.position.xmin
                ymin = detection.position.ymin  
                xmax = detection.position.xmax 
                ymax = detection.position.ymax 
                cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), detection.color, 2)
            '''
            cv2.putText(frame,'HUMAN',(10,400), cv2.FONT_HERSHEY_SIMPLEX, 4,(25,25,255),2,cv2.LINE_AA)
            cv2.imshow('frame', frame)
            cv2.imwrite(path, frame)
            #This has to RTT 
            upload_frame(path, config)
        else: 
            #add to upload queue
            write_queue.enqueue(path, frame)
        end_time = time.time()
        print("[LOGS] ---PIPELINE TIME--- **{}**".format(end_time-start_time))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break 
    cam.stop()
    write_queue.stop()
    cv2.destroyAllWindows() 
예제 #8
0
def start_cam_service():
    global cam
    if not cam:
        cam = Camera(800, 600)
    if cam and (not cam.operating):
        try:
            _thread.start_new_thread(cam.start, (get_yolo_result, ))
            # _thread.start_new_thread(hello, (1234, ))
            return jsonify({'success': True, 'operating': True})
        except:
            print("Error: start thread error ")
            return jsonify({'success': False, 'error': 'START_THREAD_ERROR'})
    else:
        return jsonify({'success': True, 'operating': True})
예제 #9
0
from temperature import Temperature
from light import Light
from cam import Camera
import time
import RPi.GPIO as GPIO

if __name__ == "__main__":
    GPIO.setmode(GPIO.BCM)
    camera = Camera("pub_monitor_thread")
    camera.start()
    while True:
        # get and pub temperature
        temperature = Temperature()
        temperature.pub_temperature()
        # get and pub light
        light = Light(17, 0)
        light.pub_light()
        time.sleep(1)
예제 #10
0
파일: peep.py 프로젝트: Ngoguey42/NetPagle
from m2 import M2
from wow import WoW
from cam import Camera

from objects import GameObject

# TODO: RENDER in argv
# RENDER = False
RENDER = True

GODI_PATH = 'Y:\\dbc\\GameObjectDisplayInfo.dbc'
CMD_PATH = 'Y:\\dbc\\CreatureModelData.dbc'
MODELS_PREFIX = 'Y:\\model.mpq'

w = WoW(godi_path=GODI_PATH, cmd_path=CMD_PATH)
cam = Camera(w)

it = []
it += list(w.gen_game_objects())
it += list(w.gen_players())

print('  Snapshoting screen...')
with mss.mss() as sct:
    monitor = sct.monitors[1]
    img = sct.grab(monitor)
    img = np.asarray(img)[:, :, [2, 1, 0]]
    print('  Snapshoted!', img.shape)

plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
class Main:
    def __init__(self, mrz_port="/dev/ttyACM0"):
        with timer.time(f'load-total'):
            self.card_reader = CardReader()
            self.card_reader.set_mrz_port(mrz_port)
            with timer.time(f'load-mtcnn'):
                self.mtcnn = MTCNN()
                self.mtcnn.init(480, 640)
            with timer.time(f'load-facenet'):
                self.face_embedder = FaceEmbedder()
            self.camera = Camera()
            #
            self.camera_face = None
            self.camera_emb = None
            self.reader_face = None
            self.reader_emb = None

    def prep_im(self, im):
        im = cv2.resize(im, (160, 160))
        return torch.tensor(im).permute(2, 1, 0).to('cuda')

    async def camera_loop(self):
        while True:
            with timer.time(f'camera-total'):
                frame = self.camera.read()
                print('[camera] frame:', frame.shape)
                with timer.time(f'camera-mtcnn'):
                    bboxes = self.mtcnn.findFace(frame)
                print('[camera] bboxes:', bboxes)
                for x1, y1, x2, y2 in bboxes:
                    self.camera_face = frame[y1:y2, x1:x2]
                    print('[camera] face:', self.camera_face.shape)
                    with timer.time(f'camera-facenet'):
                        im = self.prep_im(self.camera_face)
                        self.camera_emb = self.face_embedder.face2embedding(im)
                await asyncio.sleep(0)

    async def reader_loop(self):
        while True:
            print('[reader] starting read...')
            chip_img_future = self.card_reader.read_image_bytes_async()
            t0 = time()
            while not chip_img_future.isDone():
                await asyncio.sleep(0)
            byte_array = chip_img_future.get()
            if len(byte_array) > 0:
                print('[reader] byte arr len:', len(byte_array))
                chip_img = self.card_reader.bytes2np(byte_array)  # [h,w]
                timer.D['reader-read'].append(time() - t0)
                print('[reader] chip img:', chip_img.shape)
                chip_img = np.concatenate(
                    [chip_img[:, :, None] for _ in range(3)],
                    axis=2)  # [h,w,3]
                with timer.time(f'reader-mtcnn'):
                    bboxes = self.mtcnn.findFace(chip_img)
                print('[reader] bboxes:', bboxes)
                for y1, x1, y2, x2 in bboxes:
                    self.reader_face = chip_img[y1:y2, x1:x2]
                    print('[reader] chip face:', self.reader_face.shape)
                    with timer.time(f'reader-facenet'):
                        im = self.prep_im(self.reader_face)
                        self.reader_emb = self.face_embedder.face2embedding(im)
                    timer.D['reader-total'].append(time() - t0)
                    self.compare_embeddings_and_exit()

    def compare_embeddings_and_exit(self):
        if self.reader_face is None:
            print("[ERROR] Couldn't read face from reader...")
        if self.camera_face is None:
            print("[ERROR] Couldn't read face from camera...")
        cv2.imwrite('reader_face.png', self.reader_face)
        cv2.imwrite('camera_face.png', self.camera_face)
        score = float((self.reader_emb - self.camera_emb).norm())
        print('score:', score)
        timer.print_stats()
        sys.exit()

    def main(self):
        asyncio.gather(self.camera_loop(), self.reader_loop())
        asyncio.get_event_loop().run_forever()
예제 #12
0
 def initCamera(self):
     self.cam = Camera()
     self.cam.setTreshold(self._treshold)
예제 #13
0
		cam.rot +=1
	mv = Vector()
	if kbd.a:
		mv.y -= 1
	if kbd.d:
		mv.y += 1
	if kbd.w:
		mv.x += 1
	if kbd.s:
		mv.x -= 1
	if kbd.up:
		cam.rays +=1
	if kbd.down:
		cam.rays -=1
	cam.move(mv)
	cam.colides(lines)

lines.append(Line(Vector(300,100),Vector(300,200),True,wallImg))
cam = Camera()
kbd = Keyboard()
physloop = simplegui.create_timer(1000/60,pysloop)
frame = simplegui.create_frame("Points", Camera.WIDTH , Camera.HEIGHT,0)
frame.set_draw_handler(draw)
frame.set_canvas_background("White")
frame.set_keydown_handler(kbd.keyDown)
frame.set_keyup_handler(kbd.keyUp)
# pos the frame animation
frame.set_mouseclick_handler(mouse)
physloop.start()
frame.start()
예제 #14
0
import pandas as pd
import shapely.geometry as sg
import pymem

from show import patchify_points, patchify_polys
from m2 import M2
from wow import WoW
from cam import Camera
from objects import GameObject
import constants

GODI_PATH = 'Y:\\dbc\\GameObjectDisplayInfo.dbc'
CMD_PATH = 'Y:\\dbc\\CreatureModelData.dbc'
MODELS_PREFIX = 'Y:\\model.mpq'
w = WoW(godi_path=GODI_PATH, cmd_path=CMD_PATH)
cam = Camera(w)

rows = []
df = globals().get('df', pd.DataFrame())

# Mem Explorer
DEPTH_MAX = 5
BCOUNT_START = 2500
BCOUNT_IN_DEPTH = 2000
seen = set()


def find_at(addr, bcount, depth=()):
    if addr in seen:
        return
    seen.add(addr)
예제 #15
0
class imageGradientDescent():
    # Constants that will be necessary for evaluation of cost function
    X_PIXELS = 2160 // 10
    Y_PIXELS = 2160 // 10
    X_MIDPOINT = X_PIXELS / 2
    Y_MIDPOINT = Y_PIXELS / 2
    IDEAL_Z_DISP = 0
    EPSILON = 95
    MAX_STEPS = 100000

    def __init__(self):
        # Initalize the camera and motorController for use in the gradient descent
        self.cam = Camera()
        self.controller = MotorController()

    # Plot radon transform of Intensity vs projection angle and return sinogram
    def plot_radon_angle_x(self, image):
        # Open image
        img = image

        # resize image
        scale_percent = 10  # percent of original size
        width = int(img.shape[1] * scale_percent / 100)
        height = int(img.shape[0] * scale_percent / 100)
        dim = (width, height)
        resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)

        # Radon transform resized image
        nSteps = width
        thetaStart = -10
        thetaEnd = 10
        thetas = np.linspace(thetaStart, thetaEnd, nSteps)
        sinogram = radon(resized, theta=thetas, circle=True)

        # Take absolute max of sinogram across axis and find location where it occurs
        absmax_sinogram = np.amax(sinogram, axis=0)
        absmax_loc = thetas[np.where(absmax_sinogram == absmax_sinogram.max())]

        # Plot original image and absMax of Intensity of radon transform vs projection angle
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
        ax1.set_title("Original")
        ax1.imshow(img, cmap=plt.cm.Greys_r)
        ax2.set_title("Radon transform\n(Sinogram)")
        ax2.set_xlabel("Projection angle (deg)")
        ax2.set_ylabel("Maximum Intensity")
        ax2.plot(thetas, absmax_sinogram, '-')
        fig.tight_layout()
        plt.show()

    # Plot radon transform of Intensity vs x-pixels and return sinogram
    def plot_radon_pixels_x(self, image):
        # Open image
        img = image

        # resize image
        scale_percent = 10  # percent of original size
        width = int(img.shape[1] * scale_percent / 100)
        height = int(img.shape[0] * scale_percent / 100)
        dim = (width, height)
        resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)

        # Radon transform resized image
        nSteps = height
        thetaStart = 80
        thetaEnd = 100
        thetas = np.linspace(thetaStart, thetaEnd, nSteps)
        sinogram = radon(resized, theta=thetas, circle=True)

        # Take absmax of intensity in dimensions of pixels
        absmax_sinogram = np.amax(sinogram, axis=1)
        peaks, properties = sp.signal.find_peaks(absmax_sinogram,
                                                 prominence=100)

        # Plot original image and absMax of Intensity of radon transform vs projection angle
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
        ax1.set_title("Original")
        ax1.imshow(img, cmap=plt.cm.Greys_r)
        ax2.set_title("Radon transform\n(Sinogram)")
        ax2.set_xlabel("Projection angle (deg)")
        ax2.set_ylabel("Maximum Intensity")
        ax2.plot(peaks, absmax_sinogram[peaks], 'x')
        fig.tight_layout()
        plt.show()

    # Plot radon transform of Intensity vs projection angle and return sinogram
    def plot_radon_angle_y(self, image):
        # Open image
        img = image

        # resize image
        scale_percent = 10  # percent of original size
        width = int(img.shape[1] * scale_percent / 100)
        height = int(img.shape[0] * scale_percent / 100)
        dim = (width, height)
        resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)

        # Radon transform resized image
        nSteps = height
        thetaStart = 80
        thetaEnd = 100
        thetas = np.linspace(thetaStart, thetaEnd, nSteps)
        sinogram = radon(resized, theta=thetas, circle=True)

        # Take absolute max of sinogram across axis and find location where it occurs
        absmax_sinogram = np.amax(sinogram, axis=0)
        absmax_loc = thetas[np.where(absmax_sinogram == absmax_sinogram.max())]

        # Plot original image and absMax of Intensity of radon transform vs projection angle
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
        ax1.set_title("Original")
        ax1.imshow(img, cmap=plt.cm.Greys_r)
        ax2.set_title("Radon transform\n(Sinogram)")
        ax2.set_xlabel("Projection angle (deg)")
        ax2.set_ylabel("Maximum Intensity")
        ax2.plot(thetas, absmax_sinogram, '-')
        fig.tight_layout()
        plt.show()

    # Plot radon transform of Intensity vs y-pixels and return sinogram
    def plot_radon_y_pixels(self, image):
        # Open image
        img = image

        # resize image
        scale_percent = 10  # percent of original size
        width = int(img.shape[1] * scale_percent / 100)
        height = int(img.shape[0] * scale_percent / 100)
        dim = (width, height)
        resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)

        # Radon transform resized image
        nSteps = height
        thetaStart = 80
        thetaEnd = 100
        thetas = np.linspace(thetaStart, thetaEnd, nSteps)
        sinogram = radon(resized, theta=thetas, circle=True)
        absmax_sinogram = np.amax(sinogram, axis=1)
        peaks, properties = sp.signal.find_peaks(absmax_sinogram,
                                                 prominence=100)

        # Plot original image and absMax of Intensity of radon transform vs projection angle
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
        ax1.set_title("Original")
        ax1.imshow(img, cmap=plt.cm.Greys_r)
        ax2.set_title("Radon transform\n(Sinogram)")
        ax2.set_xlabel("Radii (pixels)")
        ax2.set_ylabel("Maximum Intensity")
        ax2.plot(peaks, absmax_sinogram[peaks], 'x')
        fig.tight_layout()
        plt.show()

    # Returns location of absolute max of Intensity vs Projection angle radon transform
    def getRT_angle_x(self, image, scale):
        # Create image object
        img = image

        # resize image
        scale_percent = scale  # percent of original size
        width, height = img.shape
        width = (width * scale_percent // 100)
        height = (height * scale_percent // 100)
        dim = [width, height]
        img = np.resize(img, dim)

        # Radon transform resized image
        nSteps = width
        thetaStart = -10
        thetaEnd = 10
        thetas = np.linspace(thetaStart, thetaEnd, nSteps)
        sinogram = radon(img, theta=thetas, circle=True)

        # Take absolute max of sinogram across axis and find location where it occurs
        absmax_sinogram = np.amax(sinogram, axis=0)
        absmax_loc = thetas[np.where(absmax_sinogram == absmax_sinogram.max())]
        return absmax_loc

    # Returns tuple of abs max and rel max locations and magnitudes
    # Returns [rmax1_mag_x, rmax1_loc_x, amax_mag_x, amax_loc_x, rmax2_mag_x, rmax2_loc_x]
    def getRT_pixel_x(self, image, scale):
        # Open image
        img = image

        # resize image
        scale_percent = scale  # percent of original size
        width, height = img.shape
        width = (width * scale_percent // 100)
        height = (height * scale_percent // 100)
        dim = [width, height]
        img = np.resize(img, dim)

        # Radon transform resized image
        nSteps = width
        thetaStart = -10
        thetaEnd = 10
        thetas = np.linspace(thetaStart, thetaEnd, nSteps)
        sinogram = radon(img, theta=thetas, circle=True)

        # Take absmax of intensity in dimensions of pixels
        absmax_sinogram = np.amax(sinogram, axis=1)
        peaks, properties = sp.signal.find_peaks(absmax_sinogram,
                                                 height=180,
                                                 distance=width / 4)
        vals = [peaks, absmax_sinogram[peaks]]
        return vals

    # Returns tuple of abs max and rel max locations and magnitudes
    # Returns [rmax1_mag_y, rmax1_loc_y, amax_mag_y, amax_loc_y, rmax2_mag_y, rmax2_loc_y]
    def getRT_angle_y(self, image, scale):
        # Open image
        img = image

        # resize image
        scale_percent = scale  # percent of original size
        width, height = img.shape
        width = (width * scale_percent // 100)
        height = (height * scale_percent // 100)
        dim = [width, height]
        img = np.resize(img, dim)

        # Radon transform resized image
        nSteps = height
        thetaStart = 80
        thetaEnd = 100
        thetas = np.linspace(thetaStart, thetaEnd, nSteps)
        sinogram = radon(img, theta=thetas, circle=True)

        # Take absolute max of sinogram across axis and find location where it occurs
        absmax_sinogram = np.amax(sinogram, axis=0)
        absmax_loc = thetas[np.where(absmax_sinogram == absmax_sinogram.max())]
        return absmax_loc

    # Ye boi
    def getRT_pixel_y(self, image, scale):
        # Open image
        img = image

        # resize image
        scale_percent = scale  # percent of original size
        width, height = img.shape
        width = (width * scale_percent // 100)
        height = (height * scale_percent // 100)
        dim = [width, height]
        img = np.resize(img, dim)

        # Radon transform resized image
        nSteps = height
        thetaStart = 80
        thetaEnd = 100
        thetas = np.linspace(thetaStart, thetaEnd, nSteps)
        sinogram = radon(img, theta=thetas, circle=True)

        # Take absmax of intensity in dimensions of pixels
        absmax_sinogram = np.amax(sinogram, axis=1)
        peaks, properties = sp.signal.find_peaks(absmax_sinogram,
                                                 height=180,
                                                 distance=width / 4)
        vals = [peaks, absmax_sinogram[peaks]]
        return vals

    # Evaluate cost function based on the three sinograms to be generated from image
    def calc_cf(self, image, scale):
        # Values needed for evaluation of cost function
        xparams = self.getRT_pixel_x(image, scale)
        yparams = self.getRT_pixel_y(image, scale)
        print('x params ' + str(xparams) + ' y params ' + str(yparams))
        zdisp_x = 0 - self.getRT_angle_x(image, scale)
        zdisp_y = 0 - self.getRT_angle_y(image, scale)
        rmax1_loc_x = xparams[0][0]
        rmax1_mag_x = xparams[1][0]
        rmax2_loc_x = xparams[0][2]
        rmax2_mag_x = xparams[1][2]
        rmax1_loc_y = yparams[0][0]
        rmax1_mag_y = yparams[1][0]
        rmax2_loc_y = yparams[0][2]
        rmax2_mag_y = yparams[1][2]
        amax_loc_x = xparams[0][1]
        amax_mag_x = xparams[1][1]
        amax_loc_y = yparams[0][1]
        amax_mag_y = yparams[1][1]

        # Cost function components
        s1 = np.square(self.IDEAL_Z_DISP - zdisp_x)
        s2 = np.square(self.X_MIDPOINT - amax_loc_x)
        s3 = np.square(rmax1_mag_x - rmax2_mag_x)
        s4 = np.square((amax_loc_x - rmax1_loc_x) - (rmax2_loc_x - amax_loc_x))
        s5 = np.square(self.IDEAL_Z_DISP - zdisp_y)
        s6 = np.square(self.Y_MIDPOINT - amax_loc_y)
        s7 = np.square(rmax1_mag_y - rmax2_mag_y)
        s8 = np.square((amax_loc_y - rmax1_loc_y) - (rmax2_loc_y - amax_loc_y))

        #print(str(s1) + ',' + str(s2) + ',' + str(s3) + ',' + str(s4) + ',' + str(s5) + ',' + str(s6) + ',' + str(s7) + ',' + str(s8))
        costFunction = np.sqrt(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8)
        print(str(costFunction))
        # If the number of peaks is greater than 3, return cost function times 1.5
        if len(xparams) > 3:
            return int(costFunction) * 2
        return int(costFunction)

    # Determines the stepSize based on value of cost function
    def calc_stepSize(self, cf):

        if cf > 0 and cf < 100:
            stepSize = 1
            return stepSize
        elif cf >= 100 and cf <= 200:
            stepSize = 5
            return stepSize
        elif cf > 200 and cf < 300:
            stepSize = 10
            return stepSize
        else:
            stepSize = 25
            return stepSize

    # Determines image resolution based on value of cf
    def calc_scaleFactor(self, cf):

        if cf > 0 and cf < 100:
            scaleFactor = 10
            return scaleFactor
        elif cf >= 100 and cf <= 200:
            scaleFactor = 10
            return scaleFactor
        else:
            scaleFactor = 10
            return scaleFactor

    #
    def gradientDescent(self):
        # Create dequeue of motors for descent
        motorDeque = collections.deque([1, 2, 3, 4, 5])
        # Initiliaze variables to keep track of number of steps and the step size
        numSteps = 0
        stepSize = 10
        scale = 10

        # Take a picture
        image = self.cam.takePicture().astype('uint8')
        # evaluate cost function based on the picture
        cf = self.calc_cf(image, scale)
        scale = self.calc_scaleFactor(cf)
        stepSize = self.calc_stepSize(cf)

        if (cf < self.EPSILON):
            return

        while (cf > self.EPSILON and numSteps < self.MAX_STEPS):
            # Start with first motor in dequeue
            curMotor = motorDeque.popleft()
            # Make sure to place element at end of deque
            motorDeque.append(curMotor)
            # numSteps++
            numSteps += 1
            # Choose a direction to try adjusting the motor
            direction = 0

            # Move appropriate motor

            # Rotational Z motor
            if curMotor == 1:
                self.controller.turnZ(direction, stepSize)
            # Rotational X motor
            elif curMotor == 2:
                self.controller.turnX(direction, stepSize)
            # Rotational Y motor
            elif curMotor == 3:
                self.controller.turnY(direction, stepSize)
            # Translational X motor
            elif curMotor == 4:
                self.controller.moveX(direction, stepSize)
            # Translational Z motor
            else:
                self.controller.moveZ(direction, stepSize)

            # Take a picture & evaluate new_cf
            image = self.cam.takePicture().astype('uint8')
            new_cf = self.calc_cf(image, scale)
            scale = self.calc_scaleFactor(new_cf)
            stepSize = self.calc_stepSize(new_cf)
            # From cf determine proper step size and image scale

            # If new_cf is higher, we must go in the other direction
            if new_cf > cf:
                direction = 1
                cf = new_cf

            # If cf is lower than epsilon, break the loop
            if cf < self.EPSILON:
                break

            while new_cf <= cf:
                # Tweak same motor in correct direction
                # Move appropriate motor

                # Rotational Z motor
                if curMotor == 1:
                    self.controller.turnZ(direction, stepSize)
                # Rotational X motor
                elif curMotor == 2:
                    self.controller.turnX(direction, stepSize)
                # Rotational Y motor
                elif curMotor == 3:
                    self.controller.turnY(direction, stepSize)
                # Translational X motor
                elif curMotor == 4:
                    self.controller.moveX(direction, stepSize)
                # Translational Z motor
                else:
                    self.controller.moveZ(direction, stepSize)
                numSteps += 1

                cf = new_cf
                image = self.cam.takePicture().astype('uint8')
                new_cf = self.calc_cf(image, scale)
                scale = self.calc_scaleFactor(new_cf)
                stepSize = self.calc_stepSize(new_cf)
                # If cf lower than epsilon, break the loop
                if new_cf < self.EPSILON:
                    break
예제 #16
0
 def initCamera(self):
     self.cam = Camera()
     self.cam.setTreshold(self._treshold)
예제 #17
0
    
    def setupApp(self):
        self.current_user['username'] = self.login.username_lineedit.text()
        print(self.current_user['uid'])
        self.startNetFinder()
        self.show()


    def startNetFinder(self):
        self.thread_finder = QThread()
        self.online_finder = NetFinder(self.current_user)
        self.online_finder.moveToThread(self.thread_finder)
        self.thread_finder.started.connect(self.online_finder.run)
        self.online_finder.new_client.connect(self.online_dialog.addOnlineUser)
        self.thread_finder.start()



if __name__ == '__main__':

    camera = Camera(0)

    app = QApplication([])
    # start_window = UI_Window(camera)
    start_window = NetCrawler()
    
    # apply_stylesheet(app, theme='dark_teal.xml')

    # start_window.show()
    app.exit(app.exec_())
예제 #18
0
 def __init__(self):
     # Initalize the camera and motorController for use in the gradient descent
     self.cam = Camera()
     self.controller = MotorController()
예제 #19
0
class Threading(QThread):

    sig_live_view = pyqtSignal(object)
    sig_photo = pyqtSignal(object)
    sig_error = pyqtSignal(object)
    sig_critical = pyqtSignal(int)

    def __init__(self, parent, expected_memory):
        self.log = logs.logger.add_module("Threading")
        QThread.__init__(self, parent)
        self.cam = Camera(expected_memory)
        self.cmd_fifo = queue.Queue()
        self.available_space = self.cam.get_available_space()
        self.filename = ""

    def sendThreadCommand(self, cmd):
        if not self.isRunning():
            self.start()
        self.cmd_fifo.put(cmd)

    def start_live(self):
        '''
        Startet die Vorschau. Daten werden via sig_live_view gesendet
        '''
        self.log.debug("Start liveview")
        self.sendThreadCommand(Action.PREVIEW)

    def capture_image(self):
        '''
        Erstellt Foto. Daten werden via sig_foto gesendet
        '''
        self.log.debug("Capture image")
        self.sendThreadCommand(Action.CAPTURE)

    def get_available_space(self):
        return self.available_space

    def store_last(self, filename):
        self.log.debug("Store image")
        self.filename = filename
        self.sendThreadCommand(Action.STORE)

    def dismiss_last(self):
        self.log.debug("Delete image")
        self.sendThreadCommand(Action.DISMISS)

    def stop_thread(self):
        self.sendThreadCommand(Action.TERMINATE)
        while self.isRunning():
            self.yieldCurrentThread()
        self.log.info("Thread terminated")

    def run(self):
        self.log.info("Thread started")
        state = const.STATE_LIVE
        ts = 0
        while True:
            old_state = state
            if not self.cmd_fifo.empty():
                state = self.cmd_fifo.get()
                ts = time.time()

            if state == Action.PREVIEW:
                # kontinuierlich livebild abholen und an GUI senden
                self.sig_live_view.emit(self.cam.fetch_preview())
            elif state == Action.CAPTURE:
                # einmal Foto machen und an GUI senden
                # danach warten bis weiter
                photo = self.cam.capture_image()
                if self.cam.last_image:
                    self.sig_photo.emit(photo)
                else:
                    self.sig_error.emit(photo)
                self.available_space = self.cam.get_available_space()
                state = Action.NONE
            elif state == Action.STORE:
                self.cam.store_last(self.filename)
                state = old_state
            elif state == Action.DISMISS:
                self.cam.dismiss_last()
                state = old_state
            elif state == Action.TERMINATE:
                return
            else:
                # nichts machen, um CPU zu schonen, Pause
                # BUGFIX: wenn die Kamera 30min nicht verwendet wurde, schaltet sie sich automatisch aus.
                # wenn man mind. diese Zeit auf dem Auswahlbildschirm steht, stuerzt beim naechsten Kamerazugriff
                # die Applikation ab. Loesung: kontinuierlich alle x Sekunden preview von Kamera abholen,
                # Daten aber verwerfen
                if ((ts + 10) < time.time()):
                    ts = time.time()
                    self.cam.fetch_preview()
                else:
                    self.msleep(100)

            if self.cam.error_count > const.MAX_ERROR_COUNT:
                self.sig_critical.emit(self.cam.error_count)
예제 #20
0
from evaluate_gpu import get_nearest_neighbors
from evaluate_rerank import get_nearest_neighbors_rerank
import scipy.io
import torch
from flask import Flask, jsonify, request
from flask_cors import CORS
from detect import get_model, get_bbox_of_image, arg_parse
from cam import Camera
import _thread
import os

import numpy as np

app = Flask(__name__)
CORS(app)
cam = Camera(800, 600)
import os

model_pcb = None
model_dense = None
model_yolo = None

result_pcb = scipy.io.loadmat('reid/pytorch_result_PCB.mat')
gf_pcb = torch.FloatTensor(result_pcb['gallery_f'])
gf_pcb = gf_pcb.cuda()

gc_pcb = result_pcb['gallery_cam'][0]
gl_pcb = result_pcb['gallery_label'][0]
gp_pcb = result_pcb['gallery_path'][..., 0]
# g_g_dist = np.dot(gf_pcb, np.transpose(gf_pcb))
 def __init__(self):
     self.camera = Camera()
     with timer.time(f'load'):
         self.mtcnn = MTCNN()
         self.mtcnn.init(480,640)
예제 #22
0
from base64 import b64decode

from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from tortoise.contrib.fastapi import register_tortoise

from authentication import router, check_token, credentials_exception
from cam import Camera

camera = Camera()
app = FastAPI()
app.include_router(router)


def stream_video():
    while True:
        frame = camera.get_frame()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')


@app.get('/mstreamj.mjpg')
async def main(t: bytes):
    token = b64decode(t).decode('utf-8')
    print(token)
    if await check_token(token):
        return StreamingResponse(
            stream_video(),
            media_type='multipart/x-mixed-replace; boundary=frame')
    raise credentials_exception