30.0, (int(w), int(h))) #Preparations for the video loop j = 0 finaldistx = [] finaldisty = [] finaldistz = [] totdistance = [] time = [] firstpass = True offset = [0, 0, 0] font = cv2.FONT_HERSHEY_SIMPLEX i = 0 while (i < 300): j = 1 + j img, timestamp = c.read(0) img3, timestamp = c.read(1) if (True): corners, ids, rejectedImgPoints = aruco.detectMarkers( img, aruco_dict, parameters=arucoParams) img2 = aruco.drawDetectedMarkers(img, corners, ids) h, w, cha = img2.shape rvecs, tvecs, objpoints = aruco.estimatePoseSingleMarkers( corners, sidel, mtx, dist) corners2, ids2, rejectedImgPoints2 = aruco.detectMarkers( img3, aruco_dict, parameters=arucoParams) img4 = aruco.drawDetectedMarkers(img3, corners2, ids2) h2, w2, cha2 = img4.shape rvecs2, tvecs2, objpoints2 = aruco.estimatePoseSingleMarkers( corners2, sidel, mtx2, dist2)
class LabyrinthMaker(): """LabyrinthMaker""" def __init__(self): self.start = datetime.now() self.cap = Camera([0], fps=30, resolution=Camera.RES_LARGE, colour=True, auto_gain=True, auto_exposure=True, auto_whitebalance=True) # self.cap = cv2.VideoCapture(0) self.laby = [] self.frame = 0 self.l_bg = None self.width = 1280 self.height = 720 self.l_average = 0 self.l_frame = None self.grid = None self.mask = None self.f_num = 0 self.fourcc = cv2.VideoWriter_fourcc(*'MJPG') self.out = cv2.VideoWriter('video_out/cc.avi', self.fourcc, 20.0, (self.width,self.height)) def process_cam(self, sz, flip, bw=True): # get the frame frame, timestamp = self.cap.read() # ret, frame = self.cap.read() # crop to correct ratio # frame = frame[100:460, 0:640] frame = frame[0:360, 0:640] # -1 flip hori+vert / 1 flip vert / 0 flip hori frame = cv2.flip(frame, flip) # resize smaller for faster processing # small = cv2.resize(frame, (0, 0), fx=0.15, fy=0.15) small = cv2.resize(frame, (0, 0), fx=sz, fy=sz) small = cv2.cvtColor(small, cv2.COLOR_RGB2BGR) self.l_frame = small if not bw: return small gray = cv2.cvtColor(small, cv2.COLOR_BGR2GRAY) # threshold the image # otsu threshold is adaptive # so will adjust to the range present in the image ret, thr = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # opening and dilating to remove noise # kernel size is size of operation kernel = np.ones((1, 1), np.uint8) opening = cv2.morphologyEx(thr, cv2.MORPH_OPEN, kernel, iterations=1) bg = cv2.dilate(opening, kernel, iterations=3) return bg def draw_cam(self): # draws the camera to a second window bg = self.process_cam(0.5, 1, bw=False) cv2.namedWindow("camera") cv2.moveWindow("camera", 0, 0) cv2.imshow("camera", bg) def draw_mask(self): if self.mask is not None: # saturate l_frame_hsv = cv2.cvtColor(self.l_frame, cv2.COLOR_BGR2HSV).astype("float32") h, s, v = cv2.split(l_frame_hsv) s = s * 5 s = np.clip(s, 0, 255) l_frame_hsv = cv2.merge([h, s, v]) self.l_frame = cv2.cvtColor(l_frame_hsv.astype("uint8"), cv2.COLOR_HSV2BGR) # Blur the camera image self.l_frame = cv2.blur(self.l_frame, (13, 13)) # glPointSize(13.333*2) glPointSize(13.333) glBegin(GL_POINTS) for r in range(self.mask.rows): for c in range(self.mask.columns): if not self.mask.cell_at(r, c): bb, gg, rr = self.l_frame[r, c] glColor3f(rr/255, gg/255, bb/255) glVertex2f((c+0.5)*13.333, (r+0.5)*13.333) # glVertex2f((c+0.5)*(13.333*2), (r+0.5)*(13.333*2)) # bb, gg, rr = self.l_frame[r+1, c+1] # glColor3f(rr/255, gg/255, bb/255) # glVertex2f(c*13.333+12, r*13.333+12) glEnd() def draw_laby(self): # Draws the labyrinth to gl # set the line width for drawing glLineWidth(1) # set the color of the line glColor3f(0.1, 0.1, 0.2) # begin shape with pairs of lines glBegin(GL_LINES) # the list of points is backwards so reverse it # self.mz.reverse() # loop over coordinates adding all the vertices for loc in self.laby: x1, y1, x2, y2 = loc # @ 0.1 = *5 # @ 0.25 = *2 # @ 0.15 = *3.333 glVertex2f(x1*3.333, y1*3.333) glVertex2f(x2*3.333, y2*3.333) # glVertex2f(x1*6.666, y1*6.666) # glVertex2f(x2*6.666, y2*6.666) # glVertex2f(x1*4, y1*4) # glVertex2f(x2*4, y2*4) # complete the shape and draw everything glEnd() def refresh_scene(self): # refresh the gl scene # NOTE: DOUBLE HEIGHT AND WIDTH FOR HIGHDPI, REDUCE FOR STANDARD # glViewport(0, 0, self.width*2, self.height*2) glViewport(0, 0, self.width, self.height) glMatrixMode(GL_PROJECTION) glLoadIdentity() glOrtho(0.0, self.width, 0.0, self.height, 0.0, 1.0) # glOrtho(0.0, self.width*2, 0.0, self.height*2, 0.0, 1.0) glMatrixMode (GL_MODELVIEW) glLoadIdentity() def update(self): # update the labyrinth from camera image bg = self.process_cam(0.15, 1) # if not first frame if self.l_bg is not None: # calculate the average of the current bg average = np.average(bg) # compare to the last stored average diff = average - self.l_average # if there is a big enough difference +/- if diff > 1.65 or diff < -1.65: # translate numpy array to PIL image pil_im = Image.fromarray(bg) # PERHAPS CULD PUT SOME KIND OF BACKGROUND # SUBTRACTION HERE # make a mask from the image self.mask = Mask.from_img_data(pil_im) # build a grid from the unmasked areas self.grid = MaskedGrid(self.mask) # build walls in the grid RecursiveBacktracker.on(self.grid) # get walls as list of coordinate pairs for drawing self.laby = self.grid.to_point_pairs(cell_size=4) # save the new average self.l_average = average # save the background self.l_bg = bg def save_frame(self): glReadBuffer(GL_BACK) fbuffer = glReadPixels(0, 0, self.width, self.height, GL_RGB, GL_UNSIGNED_BYTE) imagebuffer = np.fromstring(fbuffer, np.uint8) fimage = imagebuffer.reshape(self.height, self.width, 3) image = Image.fromarray(fimage) image.save("video_out/frames/live/%s.png" % self.f_num, 'png') outim = cv2.cvtColor(fimage, cv2.COLOR_RGB2BGR) self.out.write(outim) def draw(self): glClearColor(1, 1, 1, 1) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glLoadIdentity() self.refresh_scene() self.update() self.draw_mask() self.draw_laby() # self.save_frame() self.draw_cam() self.f_num = self.f_num + 1 def main(self): if not glfw.init(): return # http://www.glfw.org/docs/latest/monitor_guide.html#monitor_monitors # monitor = glfw.get_primary_monitor() # mode = monitor.video_mode window = glfw.create_window(self.width, self.height, "LabyrinthMaker_GLFW", None, None) if not window: glfw.terminate() return glfw.make_context_current(window) while not glfw.window_should_close(window): # render self.draw() glfw.swap_buffers(window) glfw.poll_events() glfw.terminate()
class StillLabyrinthMaker(): """StillLabyrinthMaker""" def __init__(self): self.start = datetime.now() self.cap = Camera([0], fps=30, resolution=Camera.RES_LARGE, colour=True, auto_gain=True, auto_exposure=True, auto_whitebalance=True) self.laby = [] self.l_bg = None self.width = 1280 self.height = 720 self.l_average = 0 self.mask = None self.grid = None self.col_frame = None self.c_size = 4 def process_cam(self, sz, flip): frame, timestamp = self.cap.read() self.col_frame = frame.copy() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = frame[0:360, 0:640] # frame = cv2.flip(frame, flip) small = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) ret, thr = cv2.threshold(small, 1, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) kernel = np.ones((1, 1), np.uint8) opening = cv2.morphologyEx(thr, cv2.MORPH_OPEN, kernel, iterations=1) bg = cv2.dilate(opening, kernel, iterations=3) return bg def add_cam_col(self): pix = self.laby.load() for r in range(self.mask.rows): for c in range(self.mask.columns): if not self.mask.cell_at(r, c): red, gre, blu = self.col_frame[r * 4, c * 4] col = (red, gre, blu) pix[c * 4, r * 4] = col red, gre, blu = self.col_frame[r * 4, c * 4 + 1] col = (red, gre, blu) pix[c * 4, r * 4 + 1] = col red, gre, blu = self.col_frame[r * 4, c * 4 + 2] col = (red, gre, blu) pix[c * 4, r * 4 + 2] = col red, gre, blu = self.col_frame[r * 4, c * 4 + 3] col = (red, gre, blu) pix[c * 4, r * 4 + 3] = col red, gre, blu = self.col_frame[r * 4 + 1, c * 4] col = (red, gre, blu) pix[c * 4 + 1, r * 4] = col red, gre, blu = self.col_frame[r * 4 + 2, c * 4] col = (red, gre, blu) pix[c * 4 + 2, r * 4] = col red, gre, blu = self.col_frame[r * 4 + 3, c * 4] col = (red, gre, blu) pix[c * 4 + 3, r * 4] = col red, gre, blu = self.col_frame[r * 4 + 1, c * 4 + 1] col = (red, gre, blu) pix[c * 4 + 1, r * 4 + 1] = col red, gre, blu = self.col_frame[r * 4 + 2, c * 4 + 1] col = (red, gre, blu) pix[c * 4 + 2, r * 4 + 1] = col red, gre, blu = self.col_frame[r * 4 + 3, c * 4 + 1] col = (red, gre, blu) pix[c * 4 + 3, r * 4 + 1] = col red, gre, blu = self.col_frame[r * 4 + 1, c * 4 + 2] col = (red, gre, blu) pix[c * 4 + 1, r * 4 + 2] = col red, gre, blu = self.col_frame[r * 4 + 2, c * 4 + 2] col = (red, gre, blu) pix[c * 4 + 2, r * 4 + 2] = col red, gre, blu = self.col_frame[r * 4 + 3, c * 4 + 2] col = (red, gre, blu) pix[c * 4 + 3, r * 4 + 2] = col red, gre, blu = self.col_frame[r * 4 + 1, c * 4 + 3] col = (red, gre, blu) pix[c * 4 + 1, r * 4 + 3] = col red, gre, blu = self.col_frame[r * 4 + 2, c * 4 + 3] col = (red, gre, blu) pix[c * 4 + 2, r * 4 + 3] = col red, gre, blu = self.col_frame[r * 4 + 3, c * 4 + 3] col = (red, gre, blu) pix[c * 4 + 3, r * 4 + 3] = col def draw(self): self.add_cam_col() self.laby.save("{}/{}.png".format("laby", "testtesttest"), "PNG", optimize=True) def main(self): bg = self.process_cam(0.125, -1) pil_im = Image.fromarray(bg) self.mask = Mask.from_img_data(pil_im) self.grid = MaskedGrid(self.mask) RecursiveBacktracker.on(self.grid) self.laby = self.grid.to_png(cell_size=self.c_size, save=False) self.draw() print("Completed In: %s" % (datetime.now() - self.start))
from pseyepy import Camera, Stream, Display import cv2 import numpy as np c = Camera([0, 1], resolution=[Camera.RES_LARGE, Camera.RES_LARGE]) #c = Camera(resolution = [Camera.RES_LARGE]) #c.exposure = 23 frame, timestamp = c.read(0) frame2, t2 = c.read(1) #matType = cv2.CV_8UC3; #test = cv2.CreateMat(480, 640, matType, frame) test = np.array(frame, copy=True) gray = cv2.cvtColor(test, cv2.COLOR_BGR2GRAY) test2 = np.array(frame2, copy=True) gray2 = cv2.cvtColor(test2, cv2.COLOR_BGR2GRAY) cv2.imshow("test", gray) cv2.imshow("test2", gray2) print(test) d = Display(c) #s = Stream(c, file_name='example_movie.avi', display=True,codec='png') # begin saving data to files #s.end() #d = Display(c) #print(frame) #s = Stream(c, file_name='example_movie.avi', display = True, codec='png') # begin saving data to files
def main(cam, show, pseye): global processing if pseye == 1: cap = Camera([0], fps=60, resolution=Camera.RES_LARGE, colour=False) frame, timestamp = cap.read() # width, height = frame.shape else: cap = cv2.VideoCapture(cam) sz = 80 run = True vals = {} tiles = [] mz = cv2.imread("live_full_mz/live_full_mz.png") c = 1 last_diff = 0 # cv2.namedWindow("mz", cv2.WND_PROP_FULLSCREEN) # cv2.setWindowProperty("mz", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) if show == 0: window = False else: window = True while (run): try: if pseye == 1: frame, timestamp = cap.read() else: ret, frame = cap.read() small = cv2.resize(frame, (0, 0), fx=0.18, fy=0.18) width, height = small.shape ret, thr = cv2.threshold(small, 1, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) kernel = np.ones((1, 1), np.uint8) opening = cv2.morphologyEx(thr, cv2.MORPH_OPEN, kernel, iterations=2) bg = cv2.dilate(opening, kernel, iterations=3) if not processing: processing = True tiles = processTiles(width, height, sz, bg) for tile in tiles: tile = np.array(tile) if window: # cv2.imshow('cam', bg) for i, tile in enumerate(tiles): cv2.imshow(('%s' % i), tile) k = cv2.waitKey(1) if k == ord('q'): run = False c += 1 except Exception as e: print(e) pass # program closing, stop camera stream and close windows if pseye == 1: cap.end() else: cap.release() cv2.destroyAllWindows()
import cv2 import numpy as np from pseyepy import Camera, Display c = Camera([0], fps=60, resolution=Camera.RES_LARGE, colour=True) # print("frame: {}, time: {}".format(frame, timestamp)) # d = Display(c) run = True frame, timestamp = c.read() cv2.namedWindow("mz", cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty("mz", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.setWindowProperty("mz", cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_FREERATIO) while run: # im = np.array(frame) frame, timestamp = c.read() # width, height = frame.shape frame = frame[100:320, 100:540] # b,g,r = cv2.split(im) cv2.imshow("mz", frame) # cv2.imshow("R", r) # cv2.imshow("G", g) # cv2.imshow("B", b)
class LabyrinthMaker(): """LabyrinthMaker""" def __init__(self): self.start = datetime.now() self.cap = Camera([0], fps=30, resolution=Camera.RES_LARGE, colour=True, auto_gain=True, auto_exposure=True, auto_whitebalance=True) self.laby = [] self.l_bg = None self.width = 1280 self.height = 720 self.l_average = 0 self.l_frame = None self.grid = None self.mask = None def process_cam(self, sz, flip, bw=True): # get the frame frame, timestamp = self.cap.read() # crop to correct ratio # frame = frame[100:460, 0:640] frame = frame[0:360, 0:640] # -1 flip hori+vert / 1 flip vert / 0 flip hori frame = cv2.flip(frame, flip) # resize smaller for faster processing # small = cv2.resize(frame, (0, 0), fx=0.15, fy=0.15) small = cv2.resize(frame, (0, 0), fx=sz, fy=sz) small = cv2.cvtColor(small, cv2.COLOR_RGB2BGR) self.l_frame = small if not bw: return small gray = cv2.cvtColor(small, cv2.COLOR_BGR2GRAY) # threshold the image # otsu threshold is adaptive # so will adjust to the range present in the image ret, thr = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # opening and dilating to remove noise # kernel size is size of operation kernel = np.ones((1, 1), np.uint8) opening = cv2.morphologyEx(thr, cv2.MORPH_OPEN, kernel, iterations=1) bg = cv2.dilate(opening, kernel, iterations=3) return bg def draw_cam(self): # draws the camera to a second window bg = self.process_cam(0.5, 1, bw=False) cv2.namedWindow("camera") cv2.moveWindow("camera", 0, 0) cv2.imshow("camera", bg) def draw_mask(self): if self.mask is not None: # Blur the camera image self.l_frame = cv2.blur(self.l_frame, (10, 10)) glPointSize(13.333) glBegin(GL_POINTS) for r in range(self.mask.rows): for c in range(self.mask.columns): if not self.mask.cell_at(r, c): bb, gg, rr = self.l_frame[r, c] glColor3f(rr / 255, gg / 255, bb / 255) glVertex2f((c + 0.5) * 13.333, (r + 0.5) * 13.333) # bb, gg, rr = self.l_frame[r+1, c+1] # glColor3f(rr/255, gg/255, bb/255) # glVertex2f(c*13.333+12, r*13.333+12) glEnd() def draw_laby(self): # Draws the labyrinth to gl # set the line width for drawing glLineWidth(1) # set the color of the line glColor3f(0.1, 0.1, 0.1) # begin shape with pairs of lines glBegin(GL_LINES) # the list of points is backwards so reverse it # self.mz.reverse() # loop over coordinates adding all the vertices for loc in self.laby: x1, y1, x2, y2 = loc # @ 0.1 = *5 # @ 0.25 = *2 # @ 0.15 = *3.333 glVertex2f(x1 * 3.333, y1 * 3.333) glVertex2f(x2 * 3.333, y2 * 3.333) # glVertex2f(x1*4, y1*4) # glVertex2f(x2*4, y2*4) # complete the shape and draw everything glEnd() def refresh_scene(self): # refresh the gl scene glViewport(0, 0, self.width, self.height) glMatrixMode(GL_PROJECTION) glLoadIdentity() glOrtho(0.0, self.width, 0.0, self.height, 0.0, 1.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity() def update(self): # update the labyrinth from camera image bg = self.process_cam(0.15, -1) # if not first frame if self.l_bg is not None: # calculate the average of the current bg average = np.average(bg) # compare to the last stored average diff = average - self.l_average # if there is a big enough difference +/- if diff > 1.5 or diff < -1.5: # translate numpy array to PIL image pil_im = Image.fromarray(bg) # make a mask from the image self.mask = Mask.from_img_data(pil_im) # build a grid from the unmasked areas self.grid = MaskedGrid(self.mask) # build walls in the grid RecursiveBacktracker.on(self.grid) # get walls as list of coordinate pairs for drawing self.laby = self.grid.to_point_pairs(cell_size=4) # save the new average self.l_average = average # save the background self.l_bg = bg def draw(self): glClearColor(1, 1, 1, 1) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glLoadIdentity() self.refresh_scene() self.update() self.draw_mask() self.draw_laby() self.draw_cam() glutSwapBuffers() def main(self): glutInit() glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH) glutInitWindowSize(self.width, self.height) glutInitWindowPosition(1280, 0) glutSetCursor(GLUT_CURSOR_NONE) window = glutCreateWindow("LabyrinthMaker") glutDisplayFunc(self.draw) glutIdleFunc(self.draw) glutMainLoop()
c1 = Camera([0, 1], resolution=[Camera.RES_LARGE, Camera.RES_LARGE]) num = 0 while num < 20: # check for char fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) # read from the camera/s frame, timestamp = c1.read() #print(frame) nparr = np.asarray(frame) print(nparr.shape) im1 = Image.fromarray(nparr[0]) im2 = Image.fromarray(nparr[1]) im1.save("left/left_{}.jpg".format(num)) im2.save("right/right_{}.jpg".format(num)) num = num + 1 # # im1 = cv2.imread("im0.png", 0) # # im2 = cv2.imread("im1.png", 0) # # cv2.imshow('image',im2)
def main(cam, show, pseye): if pseye == 1: cap = Camera([0], fps=60, resolution=Camera.RES_LARGE, colour=False) # frame, timestamp = cap.read() # width, height = frame.shape else: cap = cv2.VideoCapture(cam) # width = floor(cap.get(3)) // 2 # height = floor(cap.get(4)) // 2 # sz = 100 run = True # vals = {} mz = cv2.imread("live_full_mz/live_full_mz.png") # mz_sm = cv2.resize(mz, (0,0), fx=0.3, fy=0.3) global processing c = 1 # last_diff = 0 cv2.namedWindow("mz", cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty("mz", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.setWindowProperty("mz", cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_FREERATIO) if show == 0: window = False else: window = True while (run): try: if pseye == 1: frame, timestamp = cap.read() # width, height = frame.shape else: ret, frame = cap.read() frame = frame[100:320, 100:540] small = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # gray = cv2.cvtColor(small,cv2.COLOR_BGR2GRAY) ret, thr = cv2.threshold(small, 1, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) kernel = np.ones((1, 1), np.uint8) opening = cv2.morphologyEx(thr, cv2.MORPH_OPEN, kernel, iterations=1) bg = cv2.dilate(opening, kernel, iterations=2) # if c < 50: if not processing: # diff = detect_diffs_two(vals, c, width, height, sz, bg) # print(diff - last_diff) # d_sum = diff - last_diff # if d_sum > 0.005 or d_sum < -0.005: processing = True mz = processFrame(bg) try: mz = np.array(mz) # mz = cv2.resize(mz, (0, 0), fx=4, fy=4) except Exception as e: print(e) pass # mz = cv2.imread("live_full_mz/live_full_mz.png") # last_diff = diff # mz_sm = cv2.resize(mz, (0,0), fx=0.3, fy=0.3) # mz - cv2.imread("live_full/", 1) # detect_diffs(vals, c, width, height, sz, bg) # print("----") if window: try: # cv2.imshow('cam', bg) cv2.imshow('mz', mz) except Exception as e: print(e) pass k = cv2.waitKey(1) # if k == ord('+'): # mz = cv2.resize(mz, (0,0), fx=1.1, fy=1.1) # if k == ord('-'): # mz = cv2.resize(mz, (0,0), fx=0.9, fy=0.9) if k == ord('q'): run = False c += 1 except Exception as e: print(e) pass if pseye == 1: cap.end() else: cap.release() cv2.destroyAllWindows()