def main(): # Read in Configurables # - Initialize Camera cam_config = CamConfig() cam = camera.initialize(cam_config.img_width, cam_config.img_height) pic_number = 0 # - Initialize GPIO gpio_config = configure_gpio() # - Initialize Twitter Parameters tw_config = TwitterConfig() tweeter = Tweeter( tw_config.consumer_key, tw_config.consumer_secret, tw_config.access_token, tw_config.access_token_secret ) # - Initialize message configuration msg_config = MessageConfig() try: while True: # When Button is Pressed print("Waiting for user to press button...") GPIO.wait_for_edge(gpio_config.button_pin, GPIO.RISING) GPIO.output(gpio_config.ready_pin, False) GPIO.output(gpio_config.busy_pin, True) # Capture Image pic_file = get_next_pic_name(cam_config, pic_number) camera.take_picture(cam, pic_file) pic = open(pic_file, "rb") # Upload picture to Twitter media_status = tweeter.upload(pic) # Once Image is uploaded, Post to Twitter tweeter.tweet(media_status, msg_config.msg) # When finished, reset LEDs. GPIO.output(gpio_config.ready_pin, True) GPIO.output(gpio_config.busy_pin, False) except KeyboardInterrupt: # Message print("User exited via Keyboard Interrupt.") finally: # Cleanup/Disconnect the Camera (The code should never get here tho) camera.cleanup_camera(cam) GPIO.cleanup()
def imgarray(): """ Takes picture and displays it. Returns numpy RGB value image array Returns: The image array """ img_array = take_picture() return img_array
def rank_img(): img = take_picture() combined = get_cards(img) print(combined, flush = True) msg = rank_hand(combined) print(msg, flush = True) return statement(msg)
def take_pic(msg): user = session['user'] filename = user + ".jpg" photo = camera.take_picture(filename,user=user) # start countdown display session['photo'] = photo emit('image', {'data': photo.web_path })
def no_debug(): img = take_picture() combined = get_cards(img, debug = False) print(combined, flush = True) if len(combined)==0: return statement("I don't see a card") if len(combined) == 1: return statement("You have a {} of {}".format(*(combined[0]))) msg = say_cards(combined) print(msg, flush = True) return statement(msg)
def get_img_from_camera(): """ Gets an image numpy array from the default camera Parameters: ----------- None Returns: -------- img (numpy array): the (H,W,3) rgb values of the image """ img_array = take_picture() return img_array
def action(): global index, pwm, PET_PATH, model, tex, my_camera my_model = pretrainmodel.pretrain_model('null') STEP = 15 img = url_for('static', filename='pet.%d.jpg' % index) PET_PATH = '/home/pi/auto_petfeeder/static/pet.' + str(index) + '.jpg' m = my_model.my_predict(PET_PATH) my_list = m.tolist() #my_list[0][0]=1.0 if my_list[0][0] == 1.0: tex = 'few feed' pwm.start(0) dc = 0 for angle in range(150, -1, -STEP): dc = my_duty_cycle(angle) pwm.ChangeDutyCycle(dc) time.sleep(0.1) for angle in range(0, 121, STEP): dc = my_duty_cycle(angle) pwm.ChangeDutyCycle(dc) time.sleep(0.4) elif my_list[0][0] == 0.0: # more tex = 'more feed' index = index + 1 camera.take_picture(index, my_camera) img = url_for('static', filename='pet.%d.jpg' % index) #img=cv2.resize(img,(50,50)) return render_template('main.html', img=img, tex=tex)
def take_picture(): camera_config = {} if('width' in request.args and 'height' in request.args): width = int(request.args.get('width')) height = int(request.args.get('height')) else: width = 1920 height = 1080 camera_config['resolution'] = (width, height) res = camera.take_picture(camera_config) return jsonify({'name': res})
def take_and_label_picture(): # ts = time.time() # timeStamp = datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M') image_file = 'static/lab.jpg' print('Taking picture and saving to ' + image_file) camera.take_picture(image_file, True) graph = label_image.load_graph(MODEL_FILE) t = label_image.read_tensor_from_image_file(image_file) input_operation = graph.get_operation_by_name('import/Placeholder') output_operation = graph.get_operation_by_name('import/final_result') with tf_session(graph=graph) as sess: results = sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t}) results = numpy.squeeze(results) labels = label_image.load_labels(LABEL_FILE) for i, label in enumerate(labels): result[label] = float(results[i]) print(result) message(result)
def run(): """ Takes a photo on the computer's camera and detects faces. If a face is recognized, it's name is displayed. If not, the user is prompted to enter a name and the person is added to the database. Parameters ---------- Returns ------- """ image = take_picture() descriptors = image_to_descriptors(image) return recognize_image(descriptors[0], 0.4)
def button_1_pressed(): global photo_or_update_in_progress photo_or_update_in_progress = True print("Je vais prendre une photo") camera.start_preview(int((affichage.fenetre_center_left )-320), int((affichage.fenetre_block_height +60))) compteur = 5 while compteur >= 0: affichage.widget_information.config(text="Prise d'une photo dans " + str(compteur) + " seconde(s)") compteur -= 1 time.sleep(1) print("Prise de la photo") chemin_photo = camera.take_picture() affichage.widget_information.config(text=str("Photo prise, envoie de la photo au serveur")) code_photo = apiMirror.post_photo(chemin_photo) affichage.widget_information.config(text="Votre code photo : "+str(code_photo)) photo_or_update_in_progress = False
def labelling(): ''' takes pic using camera and labels faces ''' pic = take_picture() load_dlib_models() list_of_arr, detections = camdes.make_descriptor(pic[np.newaxis, :, :, :]) descriptors = [descriptor for arr in list_of_arr for descriptor in arr] names = [] for descriptor in descriptors: names.append(idf.id_to_faces(database, descriptor)) #print(names) #fig, ax = plt.subplots() #for i, detection in enumerate(detections): # print(detection) # draw_labels(ax, fig, detection, names[i]) #ax.imshow(pic) return names
def process_camera(): """ Using a picture from current camera, tries to predict identity of person/people :return: names:list list """ pic_array = take_picture() detections, shapes, descriptors = detect_faces(person_database, pic_array) names = [] for desc in descriptors: name = find_match(person_database, desc) names.append(name) return pic_array, names, detections, shapes, descriptors
def bye(): new_picture = take_picture() base_picture = [ picture for picture in base_pictures if user_logged.casefold() in picture.casefold() ][0] user_name, match = do_recognition(base_picture, new_picture) if match == True: exit_time = str(datetime.datetime.now())[:-7] data = { "user": user_name, "exit_time": exit_time, "photo": new_picture } Database.insert("exits", data) return render_template('bye.html', user_name=user_name, exit_time=exit_time) else: return render_template('fail.html')
def storeintodatabase(name): for rans in range(3): img_array = take_picture() # load the models that dlib has to detect faces. load_dlib_models() face_detect2 = models["face detect"] face_rec_model2 = models["face rec"] shape_predictor2 = models["shape predict"] # Take in the (H,W,3) img_array and return the number of face detections in the photo detections = list(face_detect2(img_array)) # for each detected face, create a descriptor descriptors = [] for image in range(len(detections)): shape = shape_predictor2(img_array, detections[image]) descriptor = np.array(face_rec_model2.compute_face_descriptor(img_array, shape)) descriptors.append(descriptor) if type(detections) != list: detections = list(detections) if len(detections) == 1: rect = detections[0] fig, ax = plt.subplots() x1, x2, y1, y2 = rect.left(), rect.right(), rect.top(), rect.bottom() ax.imshow(img_array[y1:y2, x1:x2, :]) else: fig, ax = plt.subplots(ncols=len(detections)) # https://stackoverflow.com/questions/46615554/how-to-display-multiple-images-in-one-figure-correctly for i, rect in enumerate(detections): x1, x2, y1, y2 = rect.left(), rect.right(), rect.top(), rect.bottom() ax[i].imshow(img_array[y1:y2, x1:x2, :]) ax[i].axis('off') faces = import_pickle() if name in faces.keys(): faces[name].append(descriptor) else: faces.update([(name,descriptor)]) pickle_the_pickle(faces) return statement("Now you are in our database!!")
def add(name=None, file_path=None, folder=False): """ This function acts as a shortcut for the user. If a name is not entered, the function will prompt the user for one. Given a file path, the image will be processed to identify any people. Given no file path, the camera will be used. :param: name: [str], default: None name of person to be identified file_path: [str], default: None path to image file folder: [Boolean], default: False if true, adds all images in folder under one name """ if not folder: if name is None: name = input("No name found. Please enter your name: ") if file_path is None: print("No file path found. Taking picture.") img_array = take_picture() else: img_array = io.imread("pic_file_path") add_image(name, img_array) else: if name is None: name = input( "No name found. Please enter name person in folder contents: ") if file_path is None: file_path = input("No file path found. Please enter file path: ") for filename in os.listdir(file_path): if filename.endswith(".pkl"): img_array = io.imread(filename) add_image(name, img_array)
def addCamera(self, name, port=0, exposure=0.2, wait = 0 ): time.sleep(wait) save_camera_config(port, exposure) img_array = take_picture() face = self.img_to_array(img_array)[0] print('There are this many faces: ', len(face)) if len(face) == 0: print(name, ' was not detected. Please try again!') return None print(img_array.shape) if len(face) > 1: print('Too many faces were detected. Please try again!') return None print(name, ' was successfully added to the database.') border = test.detectFromImg(img_array)[1][0] fig,ax = plt.subplots() ax.imshow(img_array) ax.add_patch(patches.Rectangle((border[1], border[3]),border[0]-border[1],border[2]-border[3],edgecolor = 'pink', fill=False)) ax.set_xticks([]) ax.set_yticks([]) self.add(name,face)
def loadcamera(self, name, upscale=1): # Takes picture img_array = take_picture()[:,:,:3] detections = list(face_detect(img_array, upscale)) #Loads and displays face if a SINGLE one is detected if len(detections) == 1: det = detections[0] l, r, t, b = det.left(), det.right(), det.top(), det.bottom() shape = shape_predictor(img_array, det) descriptor = np.array(face_rec_model.compute_face_descriptor(img_array, shape)) assert descriptor.size == 128, "Descriptor is not of shape (128, 1)!" self.database.append(("{}".format(name), descriptor)) self.ax.clear() self.ax.imshow(img_array) self.ax.set_xticks([]) self.ax.set_yticks([]) self.ax.add_patch(patches.Rectangle((l,b), r-l, t-b, linewidth=2, edgecolor='m', facecolor='none')) plt.xlabel("{} has been successfully loaded!".format(name)) plt.show() else: print("Error: Unable to detect {}'s face!".format(name))
def main(): config = gardentools.get_config() moisture_pin = config['moisture_sensor']['pin'] GPIO.setmode(GPIO.BOARD) GPIO.setup(moisture_pin, GPIO.IN, GPIO.PUD_DOWN) dry = GPIO.input(moisture_pin) if dry: dry = True if config['pump']['enabled'] and config['pump']['frequency'] == 'when dry': watered = pump.pump() else: watered = None if config['camera']['enabled'] and config['pump']['frequency'] == 'when dry': photo = camera.take_picture() else: photo = None else: dry = False watered = None photo = None with gardentools.Logs('opengardener.db') as db: db.write(dry=dry, photo_path=photo, watered=watered)
def camera_to_descriptors(upscale=0): """ Detects faces from a photo taken by the camera and turns each one into a 128-D descriptor. Parameters ---------- upscale : int, optional (default=0) The number of times to upscale the image and reprocess it, to find smaller faces Returns ------- np.array[int] RGB image-array or 8-bit greyscale of shape=(H, W(, 3)) List[np.array[float]] List of descriptor vectors of all detected faces in the image """ img_array = take_picture() return img_array, image_to_descriptors(img_array, upscale)
def add_person(num_photos=5, num_recordings=5): name = input("What is your name (First Last)? ") print( "We will take 5 five-second recordings of your voice. Please speak consistently for the " "entire time.") ans = input("Hit 'Enter' when ready") samples = np.zeros((num_recordings, 176400)) for i in range(num_recordings): sample = voice_rec.recording_to_sample(duration=5) samples[i] = sample[:176400] mean_vocal_emb = np.mean(voice_rec.get_embedding(samples), axis=0) mean_vocal_emb /= np.linalg.norm(mean_vocal_emb) print( "We will take 3 pictures of your face. Please move your head slightly in between photos." ) descs = [] for i in range(num_photos): ans = input(f"Hit 'Enter' to take photo {i+1}") image = take_picture() desc = face_rec.image_to_descriptors(image)[0] descs.append(desc) mean_face_desc = np.mean(descs, axis=0) with open("people.p", mode="rb") as opened_file: people = pickle.load(opened_file) people.append(objects.Person(name, mean_vocal_emb, mean_face_desc)) with open("people.p", mode="wb") as opened_file: pickle.dump(people, opened_file)
def take_image_classify_emotion(): """ Takes an image and classifies the face's emotion Returns ------- 0 if the model determines the image is negative and 1 if the model determines the image is positive """ pic = camera.take_picture() mtcnn = MTCNN() faces = mtcnn.forward(pic.copy()) model = EmotionCNN() model.load_state_dict(torch.load("emotion_model_new.pt")) model.eval() pre_process = transforms.Compose([ transforms.Resize(48), transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), transforms.Normalize(mean=[0.5], std=[0.5]) ]) classes = ["negative", "positive"] fig = plt.figure(figsize=(25, 4)) ax = fig.add_subplot() image = np.transpose(faces.numpy(), (1, 2, 0)) image = Image.fromarray((image * 255).astype(np.uint8)) image = pre_process(image) plt.imshow(np.transpose(faces.numpy(), (1, 2, 0))) output = model(image.reshape(1, 1, 48, 48)) prediction = torch.argmax(output, dim=1).item() ax.set_title(f"Predicted:{classes[prediction]}") return pic, prediction
def recognition(): fig, ax = plt.subplots() image = take_picture() load_dlib_models() face_detect = models["face detect"] face_rec_model = models["face rec"] shape_predictor = models["shape predict"] detections = list(face_detect(image)) print(detections) from matplotlib.patches import Rectangle fig, ax = plt.subplots() ax.imshow(image) colors = ['blue', 'red', 'green', 'purple', 'yellow', 'orange'] print("Number of faces detected: {}".format(len(detections))) for k, d in enumerate(detections): # Get the landmarks/parts for the face in box d. shape = shape_predictor(image, d) # Draw the face landmarks on the screen. for i in range(68): ax.plot(shape.part(i).x, shape.part(i).y, '+', color=colors[k]) return image
load_dlib_models() face_detect = models["face detect"] face_rec_model = models["face rec"] shape_predictor = models["shape predict"] with open('database.pkl', 'rb') as handle: #Gets values of database and stores it in dictionary face_dict = pickle.load(handle) print("Do you want to take a picture (0) or use a stored picture (1) for database?") mode = int(input()) print("What is the name of the input face?") person = input() #Gets the name of the face if mode==0: print("Say cheese!") cheese = input() pic = take_picture() x = makedescriptors(pic) print(x) if x is None: #Skips if no face is detected to stop error print("Oof, I didn't see a face :(") else: LogPic(x,person) elif mode==1: for i,filename in enumerate(os.listdir('.')): #Finds the images in directory that are .png and sets their descriptors for person if filename.endswith(".png"): x = uploadimage(filename) x = makedescriptors(x) if x is None: #Skips if no face is detected to stop error print(i) continue
def imgarray(): img_array = take_picture() return img_array
def load_from_camera(self): with use_camera(port=1, exposure=.5) as camera: self.img_array = take_picture()
import camera import sys import image if __name__ == "__main__": print(sys.argv) threshold = float(sys.argv[1]) camera.take_picture("foo.jpg") camera.take_picture("bar.jpg") print(image.difference_f("pictures/foo.jpg", "pictures/bar.jpg", threshold))
import pickle from database import Profile from camera import take_picture import matplotlib.pyplot as plt import optimized_descriptors as desc mode = input("add or match?\n") if mode == "match": filename = input("Database pathname?\n") with open(filename, 'rb') as file: database = pickle.load(file) picture = take_picture() names = desc.match(picture, database) print(names) elif mode == "add": name = input("What is the name?\n") filename = input("Database pathname?\n") with open(filename, 'rb') as file: database = pickle.load(file) picture = take_picture() desc.add_to_database(name, database, picture)
def takePhoto(self): return base64.b64encode(take_picture())
def yes(): #Alexa runs code when user agrees to run it pic = take_picture() return statement(emotion_test(pic))
def main(database): namereturn = '' descreturn = None ##download_model() ##download_predictor() # take the picture pic = take_picture() # first, we load the models that dlib has to detect faces. ##load_dlib_models() face_detect = models["face detect"] face_rec_model = models["face rec"] shape_predictor = models["shape predict"] # detects the face through corners detections = list(face_detect(pic)) # print(detections) # list of shape n for n faces ##fig, ax = plt.subplots() ##ax.imshow(pic) ##database = {} ##with open("database.pkl", mode="rb") as opened_file: ##database = pickle.load(opened_file) #print("DATABASE") #print(database) print("Number of faces detected: {}".format(len(detections))) ##for k, d in enumerate(detections): # Get the landmarks/parts for the face in box d. ##shape = shape_predictor(pic, d) # Draw the face landmarks on the screen. ##for i in range(68): ##ax.plot(shape.part(i).x, shape.part(i).y, '+', color="blue") import matplotlib.patches as patches ##for faces in detections: # Create a Rectangle patch ##rect = patches.Rectangle((faces.left(), faces.bottom()), faces.width(), -faces.height(), linewidth=1, edgecolor='g', ## facecolor='none') # Add the patch to the Axes ##ax.add_patch(rect) names = {} database["Unknown Counter"] = 0 unknown_counter = database["Unknown Counter"] for face in detections: # let's take a look as to what the descriptor is!! shape = shape_predictor(pic, face) descriptor = np.array( face_rec_model.compute_face_descriptor(pic, shape)) # compares descriptor to database through img_in_database cutoff = .4 #print("DATABASE") #print(database) name = match.img_in_database(descriptor, database, cutoff) if name == "not found": name = "Unknown" + str(unknown_counter) database["Unknown Counter"] += 1 # plots name underneath square ##ax.text(face.left()+(0.25*faces.width()), face.bottom()+(0.2*faces.height()), name, bbox=dict(facecolor='green', alpha=0.5)) # adds to names dictionary names[name] = descriptor descreturn = descriptor ##plt.show() add_profile = "y" #input("Would you like to add this picture to the database? [y/n] ") if add_profile == "y": for name in names: # updates or creates a profile if "Unknown" not in name: namereturn += name database = portfolio.update_profile(names[name], name, database) else: return ("Unknown", descreturn) # add_name = input(f"Would you like to give a name for {name}? [y/n] ") # if add_name == "y": # new_name = input(f"What is {name}'s name? ") # if new_name in database: # database = portfolio.update_profile(names[name], new_name, database) # else: # database = portfolio.create_profile(names[name], new_name, database) # else: # print(f"Saving this person as {name}") # database = portfolio.create_profile(names[name], name, database) with open("database.pkl", mode="wb") as opened_file: pickle.dump(database, opened_file) return (namereturn, descreturn)
def camera(self, port=0, exposure=0.2): save_camera_config(port, exposure) img_array = take_picture() if img_array == []: raise ValueError("No face detected") return img_array
sH = MySenseHat(rotation=270) if not os.path.exists("images"): os.makedirs("images") while (True): if(object_in_range(port="/dev/ttyUSB0", angle=10, distance=30)): log("Objektum értékhatáron belül") sH.show_EM() buffer_count = 1 while(True): log("Kép készitése folyamatban") sH.cam_take() #sH.flash() image = take_picture(cam_ID=0) log("Karakterfelismerés folyamatban") license_plate = ocr(api_key="69e586ed1d88957", file_name=image) log('Kapott érték: "{0}"'.format(license_plate)) if (validator(license_plate) == True): log("A kapott érték megfelelő") sH.show_CM() break else: log("A kapott érték nem megfelelő") sH.show_X() if (buffer_count <= 10): os.rename("images/{0}.png".format(image),"images/bad_{0}.png".format(image)) buffer_count += 1
def camera_to_descriptor(): pic = take_picture() load_dlib_models() return make_descriptor(pic[np.newaxis, :, :, :])
from picamera import PiCamera from time import sleep import cv2 import os from keras.models import load_model import numpy as np import camera import pretrainmodel app = Flask(__name__) app.config['SECRET_KEY'] = 'secret!' socketio = SocketIO(app) index = 1 my_camera = PiCamera() camera.take_picture(index, my_camera) PET_PATH = '/home/pi/auto_petfeeder/static/pet.' + str(index) + '.jpg' CONTROL_PIN = 17 PWM_FREQ = 50 GPIO.setmode(GPIO.BCM) GPIO.setup(CONTROL_PIN, GPIO.OUT) pwm = GPIO.PWM(CONTROL_PIN, PWM_FREQ) '''module''' def my_duty_cycle(angle=0): duty_cycle = (0.05 * PWM_FREQ) + (0.19 * PWM_FREQ * angle / 180)
pygame.display.set_mode((1, 1)) condition = True st = 0.5 camera.init() #movement.car.init() print('HAJIMAE') while condition: for event in pygame.event.get(): #print(event) if event.type == pygame.KEYDOWN: print(event.key) if event.key == pygame.K_UP: print('UP') movement.forward(st) os.chdir('/home/pi/Desktop/New_beginning/w') camera.take_picture() elif event.key == pygame.K_DOWN: movement.reverse(st) print('DOWN') elif event.key == pygame.K_LEFT: os.chdir('/home/pi/Desktop/New_beginning/a') camera.take_picture() movement.left(st) print('LEFT') elif event.key == pygame.K_RIGHT: os.chdir('/home/pi/Desktop/New_beginning/d') camera.take_picture() movement.right(st) print('RIGHT') elif event.key == pygame.K_SPACE:
startTime = 0 while (GPIO.input(PIN_ECHO) == 0): startTime = time.time() endTime = 0 while (GPIO.input(PIN_ECHO) == 1): endTime = time.time() distance = ((endTime - startTime) * 17150) distance = distance / 2.54 if loop_settle > 0: loop_settle -= 1 if loop_settle == 1: default_distance = distance if loop_settle == 0: if distance > default_distance + inch_threshold or distance < default_distance - inch_threshold: camera.take_picture(my_camera) encoded_string = None with open("./image/night_pic.png", "rb") as img_file: #encoded_string = img_file.read() encoded_string = base64.b64encode( img_file.read()) # check for close and catch errors print(encoded_string) pi_utils.write_measurement(str(distance), str(encoded_string), datetime.datetime.now()) #print "Writing to the DB." time.sleep(15) #print ("distance: %f" % distance) #print