import matplotlib as mpl mpl.use('Agg') import picamera, cv2 from lib.utils import dbprint, html_data_path from lib.camera import update_img, ColorFix clrLower = [0, 10, 10] clrUpper = [64, 255, 255] if __name__ == '__main__': with picamera.PiCamera() as camera: ss = ColorFix(camera) try: # data = ss.mask_range(clrLower, clrUpper) data = ss.blob_detection() if data: # cv2.imwrite(html_data_path('frame_%03d.jpg' % i), data['frame']) # cv2.imwrite(html_data_path('iframe_%03d.jpg' % i), data['iframe']) # cv2.imwrite(html_data_path('oframe_%03d.jpg' % i), data['oframe']) cv2.imwrite(html_data_path('frame.jpg'), data['frame']) cv2.imwrite(html_data_path('iframe.jpg'), data['iframe']) cv2.imwrite(html_data_path('oframe.jpg'), data['oframe']) # json.dump(data['hlist'], file('colorfix.json', 'w')) else: dbprint("NOT FOUND") # json.dump({'imgcount': i}, file(html_data_path('frames.json'), 'w'), indent=2) finally: update_img(camera)
from lib.utils import html_data_path from lib.utils import dbprint from lib.camera import update_img, FeatureProcess from lib.frobo_ng import frobo_ng if __name__ == '__main__': c = frobo_ng() #c.debug = True with picamera.PiCamera() as camera: fp = FeatureProcess(camera) try: update_img(camera, html_data_path('pic0.jpg')) c.turn(350) time.sleep(1) update_img(camera, 'images/pic1.jpg')) fp.percent() c.move_straight(fwd=True, max_steps=c.m2steps(0.5), max_secs=1) update_img(camera, html_data_path('pic2.jpg')) time.sleep(1) c.turn(90) time.sleep(1) c.move_straight(fwd=True, max_steps=c.m2steps(0.5), max_secs=1) update_img(camera, html_data_path('pic3.jpg')) time.sleep(1) c.turn(210) time.sleep(1) c.move_straight(fwd=True, max_steps=c.m2steps(0.5), max_secs=1)
#!/usr/bin/env python import sys, os, time, json import picamera, cv2 from lib.utils import dbprint, html_data_path from lib.camera import update_img, ShapeSearch, capture_cvimage if __name__ == '__main__': with picamera.PiCamera() as camera: ss = ShapeSearch(camera) try: data = ss.find_shapes(threshold=127) if data: if os.environ.get('RASPICAM_ROTATE', ''): angle = int(os.environ['RASPICAM_ROTATE']) rows, cols, depth = data['frame'].shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 180, 1) data['frame'] = cv2.warpAffine(data['frame'], M, (cols, rows)) cv2.imwrite(html_data_path('shapes.jpg'), data['frame']) cv2.imwrite(html_data_path('thresh.jpg'), data['thresh']) finally: update_img(camera)
fy = sp.interp1d(tlist, ylist, kind='linear') tlist = np.linspace(min(tlist), max(tlist), 50) dots[k] = [] for i in range(len(tlist)): dots[k].append({'t': tlist[i], 'x': fx(tlist[i]), 'y': fy(tlist[i])}) im = Image.new('RGBA', (800, 800), (240, 240, 240, 0)) draw = ImageDraw.Draw(im) colors = ('red', 'blue') ci = 0 zoom = 4 for k in dots.keys(): if dots[k]: clr = colors[ci] ci += 1 x = dots[k][0]['x'] * zoom y = dots[k][0]['y'] * zoom draw.ellipse((x - 5, y - 5, x + 5, y + 5), fill=clr, outline=clr) for d in dots[k][1:]: draw.line((x, y, d['x'] * zoom, d['y'] * zoom), fill=clr) x = d['x'] * zoom y = d['y'] * zoom # a_x = d['acc']['x'] # a_y = d['acc']['y'] # draw.line((x, y, x + a_x, y + a_y), fill='green') # if d['dist'] > 0 and d['dist'] < 20: # draw.ellipse((x-2, y-2, x+2, y+2), fill='orange', outline='orange') # if d.get('hit_warn', None): # draw.ellipse((x-4, y-4, x+4, y+4), fill='yellow', outline='red') im.save(html_data_path('drawing.jpg'))
cnt = 20 for tt in [ cv2.THRESH_BINARY, cv2.THRESH_BINARY_INV, cv2.THRESH_TRUNC, cv2.THRESH_TOZERO, cv2.THRESH_TOZERO_INV ]: for t in range(t_start, 255, (t_end - t_start) / cnt): data = ss.find_shapes(threshold=t, threshold_type=tt) if data: if os.environ.get('RASPICAM_ROTATE', ''): angle = int(os.environ['RASPICAM_ROTATE']) rows, cols, depth = data['frame'].shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 180, 1) data['frame'] = cv2.warpAffine( data['frame'], M, (cols, rows)) cv2.imwrite(html_data_path('shapes_%03d.jpg' % i), data['frame']) # cv2.putText(data['thresh'], 'Thresh: %d' % t, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) cv2.putText(data['thresh'], 'Thresh: %d, Type: %d' % (t, tt), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) cv2.imwrite(html_data_path('thresh_%03d.jpg' % i), data['thresh']) i += 1 dbprint(('*' * 10) + ' Image %d written' % i) json.dump({'imgcount': i}, file(html_data_path('shapes.json'), 'w'), indent=2) finally: update_img(camera)
import picamera, cv2 from lib.utils import dbprint, html_data_path from lib.camera import update_img, FeatureProcess, capture_cvimage from lib.frobo_ng import frobo_ng if __name__ == '__main__': c = frobo_ng() # c.debug = True with picamera.PiCamera() as camera: fp = FeatureProcess(camera) try: fp.percent() c.tick_left(min_angle=10) data = fp.percent() if data: cv2.imwrite(html_data_path('pic1.jpg'), data['frame']) dbprint('Left=%g' % (data['percent'] if data else data)) cv2.imwrite(html_data_path('frame.jpg'), data['frame']) c.tick_right(min_angle=20) data = fp.percent() if data: cv2.imwrite(html_data_path('pic2.jpg'), data['frame']) dbprint('Right=%g' % (data['percent'] if data else data)) finally: update_img(camera)
#!/usr/bin/env python import sys, os, time, json import picamera, cv2 from lib.utils import dbprint, html_data_path from lib.camera import update_img, StereoDisparity from lib.frobo_ng import frobo_ng if __name__ == '__main__': c = frobo_ng() # c.debug = True with picamera.PiCamera() as camera: fp = StereoDisparity(camera) try: c.tick_left(min_angle=3) fp.left_frame() c.tick_right(min_angle=3) fp.right_frame() fp.write_ply(html_data_path('disparity.ply')) cv2.imwrite(html_data_path('pic0.jpg'), fp.lframe) cv2.imwrite(html_data_path('pic1.jpg'), fp.rframe) cv2.imwrite(html_data_path('pic2.jpg'), fp.disparity) finally: update_img(camera)
#D = 808 * 0.03 / 50.68 ~ 0.478 (measured 0.49) #D = 808 * 0.03 / 32.31 ~ 0.75 (measured 0.745) if __name__ == '__main__': if 1: r = redis.Redis() use_camera(r, width=1280, height=960) # use_camera(r, brightness=80, contrast=85) # use_camera(r, width=640, height=480, brightness=80, contrast=80) # use_camera(r, brightness=90, contrast=90) time.sleep(4) try: pass # markers = collect_markers(r, fpath = html_data_path('markers.jpg')) # print json.dumps(dict([(m['id'], m['distance']) for m in markers]), indent=2) finally: time.sleep(1) make_shot(r, fpath='redis:image') rimg = r.get('image') udp_send('opcplus', data_name='hubee.jpg', data=rimg) imgdata = np.asarray(bytearray(rimg), dtype=np.uint8) img = cv2.imdecode(imgdata, cv2.CV_LOAD_IMAGE_COLOR) cv2.imwrite(html_data_path('picam_0.jpg'), img) release_camera(r) else: cam = picamera.PiCamera() time.sleep(2) # update_img(cam, brightness=90, contrast=90, exposure_mode='off') update_img(cam)
def clear_img(): unlink(html_data_path('start.jpg')) for f in glob.glob(html_data_path('found*.jpg')): unlink(f) for f in glob.glob(html_data_path('circle*.jpg')): unlink(f)
def update_img(camera): camera.exposure_mode = 'off' camera.brightness = 50 camera.contrast = 50 camera.capture(html_data_path('picam_0.jpg'), use_video_port=True)
def experiment2(c, camera): def filterMatches(base_kp, kp, matches, ratio=0.75): mkp1, mkp2 = [], [] for m in matches: if len(m) == 2 and m[0].distance < m[1].distance * ratio: m = m[0] mkp1.append(base_kp[m.queryIdx]) mkp2.append(kp[m.trainIdx]) return zip(mkp1, mkp2) try: clear_img() camera.resolution = (160, 120) camera.framerate = 5 cv_det = cv2.FeatureDetector_create(DETECTOR) cv_desc = cv2.DescriptorExtractor_create(EXTRACTOR) matcher = cv2.DescriptorMatcher_create(MATCHER) was_below = False compass = hmc5883l(gauss=4.7, declination=(12, 34)) old_heading = init_heading = compass.heading() hbase_diff = 0 h_dir = 1 with picamera.array.PiRGBArray(camera) as stream: camera.capture(stream, format='bgr', use_video_port=True) base_frame = stream.array cv2.imwrite(html_data_path('start.jpg'), base_frame) circle_count = 0 found_count = 0 base_kp = cv_det.detect(base_frame) base_kp, base_desc = cv_desc.compute(base_frame, base_kp) base_kpl = len(base_kp) for i in range(200): c.right_move(True, 40) c.left_move(False, 40) time.sleep(0.1) c.stop() time.sleep(0.2) with picamera.array.PiRGBArray(camera) as stream: camera.capture(stream, format='bgr', use_video_port=True) frame = stream.array kp = cv_det.detect(frame) kp, desc = cv_desc.compute(frame, kp) matches = matcher.knnMatch(base_desc, trainDescriptors=desc, k=2) pairs = filterMatches(base_kp, kp, matches) lp = len(pairs) rperc = (lp * 100) / base_kpl heading = compass.heading() hdiff = abs(heading - init_heading) if h_dir > 0: if hbase_diff + 5 <= hdiff: hbase_diff = hdiff elif hbase_diff >= hdiff + 5: h_dir = -1 else: if hbase_diff >= hdiff + 5: hbase_diff = hdiff elif hbase_diff + 5 <= hdiff: # found dbprint('Circle!!!') circle_count += 1 cv2.imwrite( html_data_path('circle%03d.jpg' % circle_count), frame) h_dir = 1 dbprint('%.2f%% - %.2f (%.2f, dh: %.2f)' % (rperc, heading, hdiff, abs(old_heading - heading))) if was_below: if rperc > 60: dbprint('Found close') was_below = False found_count += 1 cv2.imwrite( html_data_path('found%03d.jpg' % found_count), frame) else: if rperc < 10: was_below = True old_heading = heading finally: c.stop() dbprint('stopped')
from lib.frobo_ng import frobo_ng AZIM_SHOT = 350 AZIM_MOVE = 90 if __name__ == '__main__': c = frobo_ng() c.debug = True with picamera.PiCamera() as camera: fp = FeatureProcess(camera) try: update_img(camera, html_data_path('pic0.jpg')) c.turn(AZIM_SHOT) update_img(camera, html_data_path('pic1.jpg')) fp.percent() c.turn(AZIM_MOVE) c.move_straight(fwd=True, max_secs=1, max_steps=100) c.turn(AZIM_SHOT) dbprint('Matches %s%%' % fp.percent()) update_img(camera, html_data_path('pic2.jpg')) c.turn(AZIM_MOVE) c.move_straight(fwd=True, max_secs=1, max_steps=100) c.turn(AZIM_SHOT) dbprint('Matches %s%%' % fp.percent()) update_img(camera, html_data_path('pic3.jpg')) c.turn(AZIM_MOVE) c.move_straight(fwd=True, max_secs=1, max_steps=100)