def do_POST(self): self.send_response(201) self.send_header("Content-type", "image/jpeg") self.end_headers() file = open(camera.capture(), 'rb') self.wfile.write(file.read()) file.close() return
def control(): global num_seed num_seed = 0 model = load_model(MODEL_NAME) #----------- imagede = '/home/pi/Desktop/photos/default.jpg' img_default = load_image(imagede) classify(model, img_default) ##初始化 pygame.init() ##变量存放处 size = width, height = 300, 200 bgColor = (0, 0, 0) ##設置界面寬高 screen = pygame.display.set_mode(size) ##設置標題 pygame.display.set_caption("Team 1 Monitor") ##要在Pygame中使用文本,必须创建Font对象 ##第一个参数指定字体 ,第二个参数指定字体大小 font = pygame.font.Font(None, 20) ##调用get_linesize()方法获得每行文本的高度 line_height = font.get_linesize() position = 0 screen.fill(bgColor) ##创建一个存放的文本TXT # f = open("record.txt",'w') while True: for event in pygame.event.get(): if event.type == pygame.QUIT: # 關閉文件 # f.close() sys.exit() # print('GG\n') if event.type == pygame.KEYDOWN: # f.write(str(event) + '\n') if event.key == K_w: # print('w\n') cm.send('#W') elif event.key == K_s: cm.send('#S') if event.key == K_j: # print('w\n') cm.send('#w') elif event.key == K_k: cm.send('#s') elif event.key == K_d: cm.send('#D') elif event.key == K_a: cm.send('#A') elif event.key == K_x: cm.send('#x') elif event.key == K_b: cm.send('#b') # -------------------------------------------- elif event.key == K_p: camera.stop() imagepath = '/home/pi/Desktop/photos/' + str( num_seed) + '.jpg' img = load_image(imagepath) label, prob, _ = classify(model, img) print( 'we think image name:{} with certainty {} that it is {}' .format(imagepath, prob, label)) # ------------------------------ # 目标跟随,返回 # hd5文件请放在执行文件目录下,输入输出在photos文件夹 elif event.key == K_g: camera.stop() imagepath = '/home/pi/Desktop/photos/' + str( num_seed) + '.jpg' outputpath = '/home/pi/Desktop/photos/' + str( num_seed) + 'new.jpg' execution_path = os.getcwd() detector = ObjectDetection() detector.setModelTypeAsRetinaNet() detector.setModelPath( os.path.join(execution_path, 'resnet50_coco_best_v2.0.1.h5')) detector.loadModel() a = time.time() custom_objects = detector.CustomObjects(bottle=True) detections = detector.detectCustomObjectsFromImage( custom_objects=custom_objects, input_image=imagepath, output_image_path=outputpath, minimum_percentage_probability=50, box_show=True) b = time.time() print('the time is {}'.format(b - a)) print('the direction is {}'.format( detections[0]['direction'])) for eachObject in detections: print(eachObject['name'] + ':' + eachObject['percentage_probability']) elif event.key == K_t: num_seed = camera.capture(num_seed) elif event.key == K_q: camera.stop() print("==End of Photograph==") elif event.key == K_o: camera.start() print("==Begin of Photograph==") elif event.key == K_r: camera.record() # render()将文本渲染成Surface对象 # 第一个参数是带渲染的文本 # 第二个参数指定是否消除锯齿 # 第三个参数指定文本的颜色 screen.blit(font.render(str(event), True, (0, 255, 0)), (0, position)) position += line_height if position >= height: position = 0 screen.fill(bgColor) pygame.display.flip()
def send_frame(): while True: buf = camera.capture() yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'+ buf ) del buf gc.collect()
print("GPIO.RPI_INFO['P1_REVISION'] = " + str(GPIO.RPI_INFO['P1_REVISION'])) io20 = 20 io21 = 21 GPIO.setmode(GPIO.BCM) GPIO.setup(io20, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(io21, GPIO.OUT) print("PRess button to turn ON LED") try: while (True): if (GPIO.input(io20)): GPIO.output(io21, GPIO.LOW) else: GPIO.output(io21, GPIO.HIGH) camera.capture() except KeyboardInterrupt: print("\n") print("Exit by KeyboardInterrupt\n") except: print("\n") print("Exit by Other case!\n") finally: GPIO.cleanup(io20) GPIO.cleanup(io21) print("Clean up GPIO\n")
# Get current time [s] (UTC) iso8601_time_string = clock.iso8601_time_string_using_computer_clock() # Form the current basename for saving the image (this is # the basename, no extension, the extension is left to the # specific capture method) basename = iso8601_time_string.replace(':', '-').replace('.', '-') basename += '_' basename += station_parameters['stationName'] local_basename = os.path.join(images_directory, basename) # Capture image and save it to the local disk capture_filepath = \ camera.capture(station_parameters, camera_parameters, local_basename, verbose=True) # If the capture was not successful, exit the script if capture_filepath == None: msg = 'Capture unsuccessful ...' msg += '\n' sys.stdout.write(msg) sys.stdout.flush() msg = '\n' msg += 'Turning off the camera ...' msg += '\n' sys.stdout.write(msg) sys.stdout.flush() camera.close(station_parameters, camera_parameters, verbose=False)
import logging from arguments import parse_arguments from camera import capture logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO) if __name__ == "__main__": arguments = parse_arguments() if arguments.lower_limit: MIN_FRAME_PROB = arguments.lower_limit if arguments.trace_logs: logging.getLogger().setLevel(logging.DEBUG) # print(arguments.preview) capture(arguments.workdir, arguments.device, arguments.preview)
#!/usr/bin/env python import camera import time camera = picamera.PiCamera() camera.start_preview time.sleep(10) camera.capture('/home/pi/desktop/img.jpg') camera.stop_preview()
spisd = SPI(-1, sck=Pin(14), mosi=Pin(15), miso=Pin(2)) #time.sleep(0.1) while True: try: sd = sdcard.SDCard(spisd, machine.Pin(13)) break except OSError: print(' This is error for SD connect.') flash=Pin(04,Pin.OUT) try: os.mount(sd, '/sd') except: os.umount('/sd') os.mount(sd, '/sd') os.listdir('/sd') #--------------------------- camera.init() #time.sleep(0.5) flash.on() img=camera.capture() flash.off() with open('/sd/.jpg','w') as f: f.write(img) camera.deinit() os.listdir("/sd") os.umount("/sd")
def main(): camera.capture() blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN( ) # attempt KNN training if blnKNNTrainingSuccessful == False: # if KNN training was not successful print( "\nerror: KNN traning was not successful\n") # show error message return # and exit program # end if imgOriginalScene = cv2.imread("image.jpg") # open image if imgOriginalScene is None: # if image was not read successfully print("\nerror: image not read from file \n\n" ) # print error message to std out os.system("pause") # pause so user can see error message return # and exit program # end if listOfPossiblePlates = DetectPlates.detectPlatesInScene( imgOriginalScene) # detect plates listOfPossiblePlates = DetectChars.detectCharsInPlates( listOfPossiblePlates) # detect chars in plates cv2.imshow("imgOriginalScene", imgOriginalScene) # show scene image if len(listOfPossiblePlates) == 0: # if no plates were found print("\nno license plates were detected\n" ) # inform user no plates were found else: # else # if we get in here list of possible plates has at leat one plate # sort the list of possible plates in DESCENDING order (most number of chars to least number of chars) listOfPossiblePlates.sort( key=lambda possiblePlate: len(possiblePlate.strChars), reverse=True) # suppose the plate with the most recognized chars (the first plate in sorted by string length descending order) is the actual plate licPlate = listOfPossiblePlates[0] cv2.imshow( "imgPlate", licPlate.imgPlate) # show crop of plate and threshold of plate cv2.imshow("imgThresh", licPlate.imgThresh) if len(licPlate.strChars) == 0: # if no chars were found in the plate print("\nno characters were detected\n\n") # show message return # and exit program # end if drawRedRectangleAroundPlate( imgOriginalScene, licPlate) # draw red rectangle around plate print("\nlicense plate read from image = " + licPlate.strChars + "\n") # write license plate text to std out print("----------------------------------------") writeLicensePlateCharsOnImage( imgOriginalScene, licPlate) # write license plate text on the image cv2.imshow("imgOriginalScene", imgOriginalScene) # re-show scene image cv2.imwrite("imgOriginalScene.png", imgOriginalScene) #########comparing with database global a a = licPlate.strChars # write image out to file # end if else cv2.waitKey(0) # hold windows open until user presses a key return a
def frame_gen(): while True: yield camera.capture()
# Example script to automatically set the shutter such that the brightest pixel is just below saturation import camera as cam cam.init() cam.autoAdjustShutter() # Finally cam.capture() cam.close()
def main(): drive = Image_to_Text() capture(duration=10) print("Image captured") res = drive.image2text('../assets/capture.png')[20:200] speak(res)
def photo(name): flash_light.on() img = camera.capture() flash_light.off() with open(name, 'w') as f: f.write(img)
def frame_gen(): while True: buf = camera.capture() yield buf del buf gc.collect()
def optimise(stepCount=5, waveguideSizeX=5000, waveguideSizeZ=2000, fineStep=50, iterationLimit=10): ''' stepCount: The initial coarse optimisation step count in both directions. waveguideSizeX: size of waveguide in nanometers in the x direction (along the 1D array) waveguideSizeZ: the thickness of waveguide fineStep: The fine tuning step in nanometers. iterationLimit: The number of fine tuning routines ''' # start with a rough scan through the entire waveguide + the space around it stepSizeX = int(np.floor(1.1 * waveguideSizeX / 2 / stepCount)) stepSizeZ = int(np.floor(1.1 * waveguideSizeZ / 2 / stepCount)) X = bpc.getPosition(2) Z = bpc.getPosition(1) maxVal = 0 maxX = X maxZ = Z print("------------------------") print( "Starting coarse optimisation. This should take only a short while. ") for i in range(-stepCount, stepCount + 1, 1): # Scan in the X direction first pos = X + i * stepSizeX bpc.position(2, pos) img = cam.capture(False, True) val = np.max(img) _printDebugInfo(val, pos, Z, maxVal, maxX, maxZ, img) if val > maxVal: maxVal = val maxX = pos # return to max X position bpc.position(2, maxX) for i in range(-stepCount, stepCount + 1, 1): # Scan in the Z direction pos = Z + i * stepSizeZ bpc.position(1, pos) img = cam.capture(False, True) val = np.max(img) _printDebugInfo(val, maxX, pos, maxVal, maxX, maxZ, img) if val > maxVal: maxVal = val maxZ = pos # position to maxX and max Z then start fine tuning print("------------------------") print( "Coarse tuning complete. Current max CCD value is {} and position is at ({}, {})" .format(maxVal, maxX, maxZ)) bpc.position(1, maxZ) bpc.position(2, maxX) i = 0 # Fine tuning moves the fiber around 20% of the waveguide (2 x 1 / 10) dX = int(waveguideSizeX / 10) dZ = int(waveguideSizeZ / 10) while i < iterationLimit: # Termination condition if i != 0 and maxX == bX and maxZ == bZ: print( "*****************************************************************************" ) print( "No improvement for the last completed fine tuning routine. Iteration complete. " ) break print("------------------------") print("Iteration {} - Current maximum {} at position ({}, {})".format( i, maxVal, maxX, maxZ)) i += 1 # max values before each iteration bX = maxX bZ = maxZ # Optimise X for XX in range(maxX - dX, maxX + dX, fineStep): bpc.position(2, XX) img = cam.capture(False, True) val = np.max(img) _printDebugInfo(val, XX, maxZ, maxVal, maxX, maxZ, img) if val > maxVal: maxVal = val maxX = XX # Now return X to max position and optimise Z bpc.position(2, maxX) for ZZ in range(maxZ - dZ, maxZ + dZ, fineStep): bpc.position(1, ZZ) img = cam.capture(False, True) val = np.max(img) _printDebugInfo(val, maxX, ZZ, maxVal, maxX, maxZ, img) if val > maxVal: maxVal = val maxZ = ZZ return maxX, maxZ
# An example using openCV to process the image captured import camera import cv2 camera.init(0) img = camera.capture(False) camera.close() cv2.imshow("image", img) cv2.waitKey()
def control(): global num_seed num_seed = 0 model = load_model(MODEL_NAME) ##初始化 pygame.init() ##变量存放处 size = width, height = 300, 200 bgColor = (0, 0, 0) ##設置界面寬高 screen = pygame.display.set_mode(size) ##設置標題 pygame.display.set_caption("Team 1 Monitor") ##要在Pygame中使用文本,必须创建Font对象 ##第一个参数指定字体 ,第二个参数指定字体大小 font = pygame.font.Font(None, 20) ##调用get_linesize()方法获得每行文本的高度 line_height = font.get_linesize() position = 0 screen.fill(bgColor) ##创建一个存放的文本TXT # f = open("record.txt",'w') while True: for event in pygame.event.get(): if event.type == pygame.QUIT: # 關閉文件 # f.close() sys.exit() # print('GG\n') if event.type == pygame.KEYDOWN: # f.write(str(event) + '\n') if event.key == K_w: # print('w\n') cm.send('#W') elif event.key == K_s: cm.send('#S') if event.key == K_j: # print('w\n') cm.send('#w') elif event.key == K_k: cm.send('#s') elif event.key == K_d: cm.send('#D') elif event.key == K_a: cm.send('#A') elif event.key == K_x: cm.send('#x') elif event.key == K_b: cm.send('#b') # -------------------------------------------- # 这下面的代码是云加的,想法是按下p键开始预测,与之对应的向拍照传入种子来编号 elif event.key == K_p: imagepath = '/home/pi/Desktop/photos/' + str(num_seed) + '.jpg' img = load_image(imagepath) label, prob, _ = classify(model, img) print('we think image name:{} with certainty {} that it is {}'.format(imagepath, prob, label)) # --------------------------------------------- # --------------------------------------------- elif event.key == K_t: num_seed = camera.capture(num_seed) elif event.key == K_q: camera.stop() print("==End of Photograph==") elif event.key == K_o: camera.start() print("==Begin of Photograph==") elif event.key == K_r: camera.record() # render()将文本渲染成Surface对象 # 第一个参数是带渲染的文本 # 第二个参数指定是否消除锯齿 # 第三个参数指定文本的颜色 screen.blit(font.render(str(event), True, (0, 255, 0)), (0, position)) position += line_height if position >= height: position = 0 screen.fill(bgColor) pygame.display.flip()
import uos import time import camera from machine import SDCard uos.mount(SDCard(), '/sd') uos.chdir('sd') camera.init(0, format=camera.JPEG) i = 0 while i < 10000: buf = camera.capture() imgname = str(i) + ".jpg" img = open(imgname, 'w') img.write(buf) img.close() time.sleep(1) i += 1
def iCaptureImage(): if exists(P["photo"]): os.remove(P["photo"]) camera.capture(P) assert exists(P["photo"]), "No file."
import cv2 import numpy as np from scipy import ndimage import serial import struct import time n = 10 X = fittest.X Y = fittest.Y scanning.init_show() cv2.moveWindow("SLM", 1360, 0) time.sleep(1) display = camera.capture() #cv2.imshow('display',display) #cv2.waitKey(1) (x_center, y_center) = ndimage.measurements.center_of_mass(display) input() measurement = np.zeros((95, 53, 10)) phase = np.zeros((95, 53)) amplitude = np.zeros((95, 53)) phase_file = open("phase.txt", "a") amplitude_file = open("amplitude.txt", "a") for i in range(-47, 48): for j in range(-26, 27): sinusoidal = np.zeros(n)
async def start(to): await asyncio.sleep(.12) print(color.blue()+'STARTING PYCOM CAM'+color.normal()) await asyncio.sleep(1) while True: cam_server = sys_info.get('cam_server') cam_host = cam_server['host'] port = cam_server['port'] cam_address = cam_server['address'] wifi_st = sys_info.get('wifi') if wifi_st == False: await asyncio.sleep(3) if wifi_st != False: if cam_address == '': if cam_host != '': try: cam_address = socket.getaddrinfo(cam_host, port)[0][-1] sys_info.setd('cam_server','address',cam_address) except: cam_address = '' print('Error getting CAM addr info') else: print('cam_host not ready') await asyncio.sleep(3) else: conn_try=0 print(color.blue()+'{\n\tCONNECTING TO PYCOM CAM'+color.normal()) try: s = socket.socket() s.setblocking(False) connected = False while connected == False: try: s.connect(cam_address) except OSError as e: if str(e) == "127": connected = True else: conn_try = conn_try+1 if conn_try > to: print(color.red()+'\tCAM CONN F'+color.normal()) conn_try = to break await asyncio.sleep(.1) pass #connected if conn_try < to: print(color.blue()+'\tconnected to cam_address'+color.normal()) print('\tsending img') img_data = {'user':machine_data.get_key('user'),'id':machine_data.get_key('id')} id_data = json.dumps(img_data) s.send(id_data.encode()) conn_try = 0 #s | | | | #0#1#2 #s | | | | while True: if conn_try > to: print('\n\tcouldnt send picture') break frame = False cam.light('1') print('\tgetting img') frame = camera.capture() cam.light('0') #s | | | | #0#1#2 #s | | | | print('\n\tsending img') while True: try: while frame: sent = s.send(frame) frame = frame[sent:] await asyncio.sleep(.01) print('\timg sent') break except OSError as e: if conn_try > to: print(color.red()+'CAM SEND F'+color.normal()) conn_try = 0 break conn_try = conn_try+1 await asyncio.sleep(.1) #s | | | | #0#1#2 #s | | | | print('\tsending end line') data = json.dumps({'command':'imgsent'}) data = data.encode() while True: try: while data: sent = s.send(data) data = data[sent:] conn_try = 0 print('\tdata sent') machine_data.set('command',{'command':'wait'}) break except OSError as e: if conn_try > to: print(color.red()+'DATA SEND F'+color.normal()) break conn_try = conn_try+1 await asyncio.sleep(.1) #s | | | | #0#1#2 #s | | | | print('\treceiving CAM server data') while True: try: res = s.recv(256) await asyncio.sleep(.01) if str(res).find('command') != -1: print('\tCAM server data received: ') print(res) conn_try = 0 break if conn_try > to*10: print(color.red()+'\tCAM RECV F'+color.normal()) break conn_try = conn_try + 1 except OSError as e: if conn_try > to: print(color.red()+'\tERROR CAM RECV F'+color.normal()) break conn_try = conn_try + 1 await asyncio.sleep(.1) await asyncio.sleep(.1) #s | | | | #0#1#2 #s | | | | print(color.yellow()+'\tcam conn_try', conn_try) print(color.red()+'\tcam out\n}\n'+color.normal()) sys_info.setd('cam_server','timeout',conn_try) s.close() del s except OSError as e: print('cam socket failed',str(e)) gc.collect() await asyncio.sleep(.1)
def rdImage(): q=camera.capture() return b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'+q
async def cam(to): print(color.blue() + 'SENDING CAM' + color.normal()) await asyncio.sleep(1) conn_try = 0 while True: connected = False #print('try to connect') try: s = socket.socket() s.setblocking(False) while connected == False: try: s.connect(cam_address) except OSError as e: #print(conn_try) if str(e) == "127": connected = True conn_try = 0 else: conn_try = conn_try + 1 if conn_try > to: print(color.red() + 'CAM CONN F' + color.normal()) break await asyncio.sleep(.05) pass if conn_try != to: print(color.green() + '{\n\tconnected to cam_address' + color.normal()) try: n_try = 0 buf = False while (n_try < 10 and buf == False): #{ # wait for sensor to start and focus before capturing image print('\tgetting img') buf = camera.capture() if (buf == False): await asyncio.sleep(2) n_try = n_try + 1 print('\tsending img') while True: try: type = 'image/jpeg' size = len(buf) req = 'snap/dev_cam01' header = h['POST'].replace('$req', req).replace( '$type', type) s.send(b'%s %d\r\n\r\n' % (header, size)) while buf: sent = s.send(buf) buf = buf[sent:] await asyncio.sleep(.5) break except OSError as e: print(e) if conn_try > to: print(color.red() + 'CAM SEND F' + color.normal()) break conn_try = conn_try + 1 #POST(s, 'snap/dev_cam01', 'img',buf) print('\timg sent') except OSError as e: print('\tsending cam failed ' + str(e)) s.close() print(color.red() + '\tcam out\n}\n' + color.normal()) del s except OSError as e: print('cam socket failed', str(e)) gc.collect() conn_try = 0 await asyncio.sleep(.1)
def capture(): camera.capture() flash('Successfully took a picture') return redirect(url_for('index'))
def index(req, resp): # parse query string req.parse_qs() flash = req.form.get('flash', 'false') if flash == 'true': led.on() stream = req.form.get('stream', 'false') stream = True if stream == 'true' else False # Camera resilience - if we fail to init try to deinit and init again if (not camera.init(0, format=camera.JPEG)): camera.deinit() await asyncio.sleep(1) # If we fail to init, return a 503 if (not camera.init(0, format=camera.JPEG)): yield from picoweb.start_response(resp, status=503) yield from resp.awrite('ERROR: Failed to initialise camera\r\n\r\n') return # wait for sensor to start and focus before capturing image await asyncio.sleep(2) n_frame = 0 while True: n_try = 0 buf = False while (n_try < 10 and buf == False): # { # wait for sensor to start and focus before capturing image buf = camera.capture() if (buf == False): await asyncio.sleep(2) n_try = n_try + 1 if (not stream): led.off() camera.deinit() if (type(buf) is bytes and len(buf) > 0): try: if (not stream): yield from picoweb.start_response(resp, "image/jpeg") yield from resp.awrite(buf) print('JPEG: Output frame') break if (n_frame == 0): yield from picoweb.start_response(resp, "multipart/x-mixed-replace; boundary=myboundary") yield from resp.awrite('--myboundary\r\n') yield from resp.awrite('Content-Type: image/jpeg\r\n') yield from resp.awrite('Content-length: ' + str(len(buf)) + '\r\n\r\n') yield from resp.awrite(buf) except: # Connection gone? print('Connection closed by client') led.off() camera.deinit() return else: if (stream): led.off() camera.deinit() #picoweb.http_error(resp, 503) yield from picoweb.start_response(resp, status=503) if (stream and n_frame > 0): yield from resp.awrite('Content-Type: text/html; charset=utf-8\r\n\r\n') yield from resp.awrite('Issues:\r\n\r\n' + str(buf)) return print('MJPEG: Output frame ' + str(n_frame)) n_frame = n_frame + 1
def create(config): image = camera.capture() with open(config) as file: detector = detector.Detector(json.load(file)["detector"]) results = detector.detect(image) click.echo(results)
async def start(to): await asyncio.sleep(.24) print(color.yellow() + 'STARTING AP_CAM' + color.normal()) await asyncio.sleep(1) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) a = ('0.0.0.0', 81) s.bind(a) s.listen(2) # queue at most 2 clients s.setblocking(False) while True: try: client, addr = s.accept() ip, port = str(addr[0]), str(addr[1]) print('{') print(color.yellow() + '\tConnection from ' + ip + ':' + port) # Use: poller = uselect.poll() poller.register(client, uselect.POLLIN) res = poller.poll(50) # time in milliseconds if not res: print('\toperation timed out') else: client_data = client.recv(1024) client_data = client_data.decode('utf-8') req = client_data.split(' ') try: print('\t', req[0], '##', req[1], "##", addr) req = req[1].split('/') print('req split', req) except OSError as e: print('\t#failed to split req', e) while True: try: if req[1] == 'live': print('\taccesing', req[1]) client.send(b'%s' % hdr.get('stream')) client.send(b'%s' % hdr.get('frame')) n_try = 0 buf = False cam.light('1') while (n_try < 10 and buf == False): #{ # wait for sensor to start and focus before capturing image #print('\tgetting img') buf = camera.capture() if (buf == False): await asyncio.sleep(1) n_try = n_try + 1 cam.light('0') #print('\tsending img:', len(buf)) try: while buf: sent = client.send(buf) buf = buf[sent:] #print('\timg sent') except OSError as e: print('send apcam error', e) client.send( b'\r\n') # send and flush the send buffer except OSError as e: print(e) break await asyncio.sleep(.1) client.close() print(color.red() + '\tConnection ' + ip + ':' + port + ' closed' + color.normal() + '\n}') except OSError as e: if str(e) != '[Errno 11] EAGAIN': print(e) await asyncio.sleep(.1)
if __name__ == '__main__': init() positions = create_arm_positions(num_positions) print('Number positions: {}'.format(len(positions))) for _positions in positions: print('Position: {}'.format(_positions[0])) if move_to(_positions, photobox_size[0] / 2 - 0.10) and not test: for i in range(num_spins): deg = 360 * i / num_spins print('Turntable angle: {} degrees'.format(deg)) set_turntable_deg(deg) if not simulation: print('Capturing photo') file = camera.capture() if file is not None: print('preview: ' + file) photogrammetry_host = rospy.get_param( '~photogrammetry_host', None) photogrammetry_http_port = rospy.get_param( '~photogrammetry_http_port', None) photogrammetry_password = rospy.get_param( '~photogrammetry_password', '') if photogrammetry_host is not None and photogrammetry_http_port is not None and photogrammetry_password is not None: print( 'Transfering capture to photogrammetry server') try:
def _img(httpClient, httpResponse) : x=camera.capture() content=base64.b64encode(x) httpResponse.WriteResponseOk( headers= None,contentType = "text/html",contentCharset = "UTF-8",content = content )
import machine from machine import Pin import camera import time flash = Pin(4, Pin.OUT) flash.value(1) time.sleep(1) flash.value(0) uos.mount(machine.SDCard(), "/sd") #mount the SD card camera.init() camera.quality(10) camera.framesize(9) count = 0 while True: if count == 2200: print("Completed") break flash.value(1) pic = camera.capture() flash.value(0) file = open("/sd/pics/" + str(count) + ".jpg", "wb") file.write(pic) file.close() print(count, " done") count += 1 time.sleep(1) print("done")
def oac(abc): q=0 while 1: x=camera.capture() upload(x,conf)
def photo(): timestamp = time.time() image = '/home/pi/camera/image'+str(timestamp)+'.jpg' camera.capture(image) print ('click click') return image
def cap(): global n n = n + 1 buf = camera.capture() return buf