Esempio n. 1
0
	def update(self):
		if self.current_state == STATE_ROLL:
			#State 1) Roll a card, wait for photo sensor to trigger
			stepper.turn(6)
			if photo_resistor.isActive() == True:
				self.current_state = STATE_IMAGE_PROCESSING
				stepper.stop()
				print('------------------------')
		elif self.current_state == STATE_IMAGE_PROCESSING:
			#State 2) take a picture, do stuff with it
			time.sleep(0.9) #We need to wait for the camera to update properly
			#1) Take a picture
			camera.camera_handler.toggleShowOutput(False)
			captured = camera.read().copy()
			if captured is None:
				return
			#2) Pre-process image for Tesseract
			camera.to_display = captured.copy()
			processed_path = image_processing.process(captured)
			if processed_path is None:
				return
			#3) Pass image threw Tesseract
			ocr_string = ocr_processing.processImage(processed_path)
			#4) Parse result string
			card_data = mtg.parsing.parse(ocr_string)
			self.last_processed_data = card_data
			print(card_data, ocr_string)
			self.log.append(card_data)
			self.processed_count += 1
			self.current_state = STATE_FILTER
		elif self.current_state == STATE_FILTER:
			#State 3) Move filter into position
			#TODO - Cleanup this
			#if (self.last_processed_data['extention'] == 'AKH'):
			#	filters.open(1)
			#elif (self.last_processed_data['extention'] == 'BFZ'):
			#	filters.open(2)
			#elif (self.last_processed_data['extention'] == 'KLD'):
			#	filters.open(3)
			#elif (self.last_processed_data['extention'] == 'ORI'):
			#	filters.open(4)
			#else:
			#	filters.close()
			self.current_state = STATE_DROP
		elif self.current_state == STATE_DROP:
			#State 4) Roll gently to drop a card, wait for second sensor to trigger
			#Slight static movEment to trigger drop
			for i in range(0, 5):
				stepper.step() #A few stepsis all it takes
				time.sleep(0.05)
			if photo_resistor.isActive() == False:
				time.sleep(0.5)
				self.current_state = STATE_ROLL
			else:
				stepper.turn(30)
		return False
Esempio n. 2
0
 def run(self):
     while not self._stopped.wait(0.01):
         if len(img_list) > 1:
             imgs = []
             for i in range(2):
                 imgs.append(img_list[i])
             for i in range(2):
                 del img_list[0]
             output_img, movement = process(imgs)
             processed_outputs.append((output_img, movement))
Esempio n. 3
0
def telemetry(data, image):
    prediction = dict()
    prediction["accel_val_auto"] = 0         # 0 to 100
    prediction["steering_angle_auto"] = 0    # 0 to 1
    if data:
        # The current steering angle of the car
        steering_angle = float(data["steering_angle_auto"])
        # The current throttle of the car, how hard to push peddle
        throttle = float(data["accel_val_auto"])
        # The current speed of the car
        speed = float(data["speed"])
        # The current image from the center camera of the car
        #image = Image.open(BytesIO(base64.b64decode(data["image"])))
        try:
            image = np.asarray(image)       # from PIL image to numpy array
            image = image_processing.process(image)

            cv2.imshow('stream', image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                exit()

            image = utils.preprocess(image) # apply the preprocessing
            image = np.array([image])       # the model expects 4D array

            # predict the steering angle for the image
            steering_angle = float(model.predict(image, batch_size=1))
            # lower the throttle as the speed increases
            # if the speed is above the current speed limit, we are on a downhill.
            # make sure we slow down first and then go back to the original max speed.
            global speed_limit
            if speed > speed_limit:
                speed_limit = MIN_SPEED  # slow down
            else:
                speed_limit = MAX_SPEED
            throttle = 1.0 - steering_angle**2 - (speed/speed_limit)**2

            throttle = throttle * 100
            steering_angle = 2*sigmoid(10* steering_angle) -1

            if 1 - abs(steering_angle) < 0.3:
                print('{} {} {} STEER'.format(steering_angle, throttle, speed))
            else:
                print('{} {} {}'.format(steering_angle, throttle, speed))

            prediction["accel_val"] = throttle
            prediction["steering_angle"] = steering_angle
        except Exception as e:
            print(e)

    return (prediction["accel_val"], prediction["steering_angle"])
Esempio n. 4
0
def main_Lan(ip0, ip1, password, chk_state):
	ip_0 = ipaddress.ip_address(ip0)
	ip_1 = ipaddress.ip_address(ip1)
	
	results =[]
	
	for ip_int in range(int(ip_0),int(ip_1)+1):
		ip = str(ipaddress.ip_address(ip_int))
		#print(ip)
		if port_scanner(ip, 554):
			temp_results = image_processing.process(password,ip,chk_state)	
			ip_0=+1
			results.append(temp_results)
		else:
			ip_0=+1	
	array=np.array(results)
	labels = ["IP", "Date", "Focus State"]
	report = pd.DataFrame(array, columns=labels)
	return report
Esempio n. 5
0
def detector(*images):
    downloader.download_haarcascade()
    detect = cv.CascadeClassifier(downloader.haarcascade_path())
    lbfmodel = "lbfmodel.yaml"
    landmark_detector = cv.face.createFacemarkLBF()
    landmark_detector.loadModel(lbfmodel)
    face_counter = 0
    for original_image in images:
        image_proc = image_processing.process(original_image)
        faces = detect.detectMultiScale(image_proc)
        image = image_processing.image_read(original_image)
        _, landmarks = landmark_detector.fit(image, faces)
        face_counter += 1
        for landmark in landmarks:
            for x, y in landmark[0]:
                cv.circle(image, (x, y), 1, (255, 0, 0), 3)
        for face in faces:
            x, y, w, d = face
            cv.rectangle(image, (x, y), (w + x, d + y), (255, 255, 255), 2)
        cv.imwrite("test{}.jpg".format(face_counter), image)
Esempio n. 6
0
		rollPID.setpoint = targetX
		pitchPID.setpoint = targetY
		
		roll_input = float(params["k_roll_p"]) * roll + roll_acceleration + rollPID(xGPS)
		pitch_input = float(params["k_pitch_p"]) * pitch - pitch_acceleration + pitchPID(-yGPS)

		front_left_motor_input = float(params["k_vertical_thrust"]) + vertical_input - roll_input - pitch_input + yaw_input
		front_right_motor_input = float(params["k_vertical_thrust"]) + vertical_input + roll_input - pitch_input - yaw_input
		rear_left_motor_input = float(params["k_vertical_thrust"]) + vertical_input - roll_input + pitch_input - yaw_input
		rear_right_motor_input = float(params["k_vertical_thrust"]) + vertical_input + roll_input + pitch_input + yaw_input

		mavic2proHelper.motorsSpeed(robot, front_left_motor_input, -front_right_motor_input, -rear_left_motor_input, rear_right_motor_input)
	
	return

xSol, ySol = image_processing.process()
Sol = np.concatenate(([xSol], [ySol]), axis=0)
Sol = np.concatenate((Sol, np.ones((1, np.size(Sol, axis=1)))), axis=0)
Sol = np.concatenate((np.array([[0,0],[0,0],[0.1, 1]]), Sol), axis=1)
Sol = np.concatenate((Sol, np.array([[Sol[0, -1]],[Sol[1, -1]],[0.1]])), axis=1)
print("---Solution---")
print(Sol)

speed=1.2
i=1
while i<np.size(Sol, 1):
    distance =  math.sqrt((Sol[0, i]-Sol[0, i-1])**2+(Sol[1, i]-Sol[1, i-1])**2+(Sol[2, i]-Sol[2, i-1])**2)
    if(i==1):
        time =distance/0.02
    else:
        time =distance/speed
Esempio n. 7
0
def run_image_proc(x):
    img, manual, mask, name = get_images()
    mark = process(img, mask)
    compare_results(manual, mark, mask, "image processing result")
Esempio n. 8
0
 def query(self, file):
     arr = image_processing.process(file)
     out = self.__query(arr)
     print(numpy.argmax(out))