textPrint.print(screen, "Number of hats: {}".format(hats) ) textPrint.indent() for i in range( hats ): hat = joystick.get_hat( i ) textPrint.print(screen, "Hat {} value: {}".format(i, str(hat)) ) textPrint.unindent() textPrint.unindent() new_a = joystick.get_axis(0) + float(1) new_a = (new_a * float(-1)) + float(2) new_a = (new_a * float(85)) + float(10) new_a = int(new_a) servo.update(new_a) if joystick.get_button(3) == 1: done = True # ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT # Go ahead and update the screen with what we've drawn. pygame.display.flip() # Limit to 20 frames per second clock.tick(60) # Close the window and quit. pygame.quit () cleanup.clean()
def deleted(type): if (type == 'yellow'): servo.update(80) else: servo.update(10)
import Acc from myclass import Antenna import servo import time #Acc.ImuInit() while True: Acc.ReadImu(Antenna,5) print Antenna.roll, Antenna.pitch, Antenna.yaw while True : print "Hold" servo.update("hold","hold") time.sleep(1) print "up" servo.update("up","up") time.sleep(2) print "hold" servo.update("hold","hold") time.sleep(1) print "down" servo.update("down","down") time.sleep(2) print "Je suis rendu"
''' Also a test file for the servo class, similar to test.py ''' import RPi.GPIO as GPIO import servo,time servo = servo.Servo(12,23,53.5/640, 41.41/480, 640, 480,90,90) print('sleep ended') for i in range (0,10): servo.update(200,300) #time.sleep(2) #GPIO.cleanup()
def main(folders): """Pretty much self-explanatory thanks to python: sets up servos and then reads images from picamera array and does stuff based on params""" servo = Servo(12,23,60/320, 45/240,320,240,90,90) global recognizer recognizer = cv2.createLBPHFaceRecognizer() trainAll(folders) if showImage: cv2.namedWindow("The Luca Bazooka", cv2.cv.CV_WINDOW_AUTOSIZE) camera=PiCamera() camera.resolution=resolution camera.framerate=framerate rawCapture=PiRGBArray(camera,size=size) frameNumber = 0 if outputToFile: video_writer=imageio.get_writer('~/The-Luca-Bazooka/'+filename,fps=24) for nonprocessed in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): frameNumber += 1 if debug: print('VIDEO CAPTURE GOING') frame=nonprocessed.array if rotate: frame=ndimage.rotate(frame,270) if debug: #print(frame) pass faces = extractFace(frame) for i in faces: (x, y, w, h) = i predicted = predict( cv2.cvtColor(crop(frame, i), cv2.COLOR_RGB2GRAY)) if showImage: cv2.imshow( 'Detected Face', cv2.cvtColor(crop(frame, i), cv2.COLOR_RGB2GRAY)) if visualizeLBP: cv2.imshow('LBP Histogram',lbp(cv2.cvtColor(crop(frame,i),cv2.COLOR_RGB2GRAY) ,1,15)) if debugStuff: print(predicted) if predicted[1] <= confidenceLevel and (showImage or outputImage): print 'FOUND LUCA FACE' cv2.rectangle(frame, (x, y), (x+w, y+h), (227, 45, 45), 2) servo.update(x+w/2, y+h/2) if debug: print('Updating servo with coords:') print(x+w/2-160,y+h/2-120) charactersToCutOff=len('/home/pi')+len("/The-Luca-Bazooka/training/") cv2.putText( frame, folders[predicted[0]][charactersToCutOff:-1], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255)) if predicted[1] <= confidenceLevel and not (showImage or outputImage): servo.update(x+w/2-160, y+h/2-120) if debug: print 'UPDATING SERVO WITH COORDS:' print (x+w/2,y+h/2) print 'FOUND LUCA FACE' print 'CONFIDENCE START' print (predicted[1]) print 'CONFIDENCE END' else: if debug: print 'FOUND NON-LUCA FACE' print (x,y,x+w,y+h) if showImage or outputImage: cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 191, 255), 2) if outputImage: cv2.imwrite(outputImagePath+str(frameNumber)+'.jpg',frame) if showImage: cv2.imshow("The Luca Bazooka", frame) if outputToFile==True: video_writer.append_data(frame) rawCapture.truncate(0) if cv2.waitKey(1) & 0xFF == ord('q'): break if outputToFile==True: video_writer.close() vidcap.release() cv2.destroyAllWindows()