def camera(): camera = PiCamera() camera.capture('front_view.png') now = t.time() process_image('front_view.png') print("Elapsed Time: {}".format(t.time() - now))
def scan(pic, camera, rotation_count): GPIO.output(green_led_pin, GPIO.HIGH) camera.capture('{}.png'.format(pic)) persons = process_image('{}.png'.format(pic)) if not persons: wii = aplay_audio("pre_made/wii.mp3") sleep(1) rotate(2) rotation_count += 1 return persons, rotation_count
def valGenerator(): while True: batchx = [] batchy = [] for fname, cl in zip(X_val, y_val): img = cv2.imread(fname) for ggg in range(generate): img = transform_image(img, 30, 20, 1) processed = process_image(img) batchx.append(processed) batchy.append([cl]) if len(batchx) == batch_size: bx = np.array(batchx) by = np.array(batchy) batchx = [] batchy = [] yield bx, by
def trainGenerator(): while True: batchx = [] batchy = [] for iii, (fname, cl) in enumerate(list(zip(X_train, y_train))): img = cv2.imread(fname) for ggg in range(generate): img = transform_image(img, 30, 20, 1) # cv2.imshow('gen',img_) # cv2.waitKey(0) processed = process_image(img) batchx.append(processed) batchy.append([cl]) if len(batchx) == batch_size: bx = np.array(batchx) by = np.array(batchy) batchx = [] batchy = [] yield bx, by
for fname, cl in zip(X_val, y_val): img = cv2.imread(fname) for ggg in range(generate): img = transform_image(img, 30, 20, 1) processed = process_image(img) batchx.append(processed) batchy.append([cl]) if len(batchx) == batch_size: bx = np.array(batchx) by = np.array(batchy) batchx = [] batchy = [] yield bx, by input_shape = process_image(np.zeros(shape=(64, 64, 3))).shape print(input_shape, 'input shape') for t in trainGenerator(): print(t[0].shape, t[1].shape) break kernel_size = (3, 3) model = Sequential() model.add( Convolution2D(256, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=input_shape)) model.add(Activation('relu'))