示例#1
0
def button_callback(channel):

    print("Button was pushed!")
    #sample for calling shell commands
    #call("echo MYPASSWORD | sudo -S shutdown -h now", shell=True)
    call("uptime ", shell=True)
    GPIO.remove_event_detect(channel)
示例#2
0
def setup_button(channel):
    print("{}: RESET GPIO {} for push".format(time.time(), channel))
    GPIO.cleanup(channel)
    GPIO.remove_event_detect(channel)
    GPIO.setup(channel, GPIO.OUT)
    GPIO.output(channel, GPIO.LOW)
    GPIO.setup(
        channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN
    )  # Set pin 10 to be an input pin and set initial value to be pulled low (off)
    #can use GPIO.RISING / FALLING / BOTH
    GPIO.add_event_detect(channel,
                          GPIO.RISING,
                          callback=button_callback,
                          bouncetime=300)  # Setup event on pin 10 rising edge
示例#3
0
文件: main.py 项目: sun3850/FoodMate
	def goingOn(self):  # 여기 callback으로 matchfood 넣을지 고민
		t = Tracker()
		Tracker_thread = Thread(target = t.track_point) 
		Tracker_thread.start()
		while (True):
			if GPIO.event_detected(self.button_pin):

				self.Cx , self.Cy = t.get_track_point()
				# 젓가락의 위치랑 이미지의 픽셀값안에 있나 확인해서 음식명 출력
				print(self.Cx , self.Cy)
				self.answer = self.guess.matchFood(self.Cx, self.Cy, self.board.box_x, self.board.box_y)
				print(self.answer)
				sound = Sound(self.answer)

				GPIO.remove_event_detect(self.button_pin)
				time.sleep(0.3)
				GPIO.add_event_detect(self.button_pin, GPIO.FALLING)
示例#4
0
    def blink(self, channel):

        global clickState
        global firstTime
        global secondTime
        global program_step
        global print_once_flag
        global rising_edge_flag
        global falling_edge_flag
        global click_once

        # print("Blink LED 2, %s"%num)

        if (GPIO.input(channel) == GPIO.LOW):
            if (program_step == 0):
                falling_edge_flag = 1
                program_step = 1

            if (clickState == 0 and program_step == 2):
                firstTime = time.time()
                clickState = 1
                click_once = 1
                # print("state 1")

            if (clickState == 1 and click_once == 0 and program_step == 2
                    and self.showEnd == 1):
                secondTime = time.time()
                clickState = 2
                self.showEnd = 0
                # print("state 2")

        elif (GPIO.input(channel) == GPIO.HIGH):
            if (program_step == 1):
                rising_edge_flag = 1
                program_step = 2

        GPIO.remove_event_detect(channel)
        GPIO.add_event_detect(gpi_grabimage,
                              GPIO.BOTH,
                              callback=self.blink,
                              bouncetime=200)
示例#5
0
def main():
    GPIO.setmode(GPIO.BCM)
    #GPIO.setwarnings(False)
    GPIO.remove_event_detect(shutdown_pin)

    print("Starting demo now! Press CTRL+C to exit")

    try:

        while True:
            setup_button(shutdown_pin)
            print('.')
            time.sleep(1)

    except KeyboardInterrupt:
        print("Ctrl-C")
        GPIO.remove_event_detect(shutdown_pin)
    except:
        print("Other error or exception occurred!")
        GPIO.remove_event_detect(shutdown_pin)
    finally:
        print("GPIO Cleanup")
        GPIO.cleanup()
示例#6
0
def main():
    x_center = display_size[0] / 2
    dead_center = display_size[0] / 2
    global start_time
    global buzz_state
    global buzz_time
    global startSequence
    global steps

    try:
        #process_angle = Process(target=start_angle)
        #process_angle.start()

        setupGPIO()

        arduino.reset_output_buffer()
        #arduino.reset_input_buffer()

        time.sleep(10)

        detect_xcenter_conn, main_xcenter_conn = Pipe(
        )  #xcenter will be updated in the detect thread and used in main thread

        #inference process
        process_detection = Process(target=object_detection,
                                    args=(detect_xcenter_conn, ))
        process_detection.start()

        time.sleep(10)  #wait forinference to startup

        buzz_state = 0
        buzz_time = 0

        print("-------------TRACKING STARTED---------------")

        last_time = time.time()

        while (1):
            if (main_xcenter_conn.poll(0.1)):
                t1 = time.time()
                print("[main]\t sending fps = ", 1 / (t1 - last_time))
                last_time = t1
                x_center = main_xcenter_conn.recv()
                #print("[main]\t xcenter = ", x_center)
                if (x_center < display_size[0] and x_center > 0):
                    send_to_arduino(int(x_center))
                    sent_times.append(time.time())
                    x_error = x_center - 160
                    if (x_error < 25):
                        theta_list.append(steps[-1] * 0.15 + 0.05 * (x_error))
                    else:
                        theta_list.append(steps[-1] * 0.15 + 0.1 * (x_error))
                else:
                    print("[main]\t xcenter out of range")

    #		now = time.time()
    #		if(startSequence and isStarted):
    #			if(now-buzz_time>0.5):
    #				GPIO.output(BUZZ_PIN, 0)
    #				startSequence = 0


#			else: #buzz every 10s
#			now = time.time()
#			if(now-buzz_time>10): #time since last buzz
#				buzz_state = 1
#				GPIO.output(BUZZ_PIN, buzz_state)
#				buzz_time = now
#				print("time of buzz = ", now)
#			elif(buzz_state and now-buzz_time>0.2):
#				buzz_state = 0
#				GPIO.output(BUZZ_PIN, buzz_state)

    except KeyboardInterrupt:
        print("-----------keyboard interrupt---------------")

    finally:
        print("------------------EXITING-------------------")
        GPIO.output(BUZZ_PIN, 0)
        GPIO.remove_event_detect(STEP_PIN)

        arduino.reset_output_buffer()
        arduino.close()
        process_detection.join()

        #LOG angle data
        #diff = []
        #last = 0
        #for a in angles:
        #	diff.append(a-last)
        #	last = a
        #clean angles data
        #global steps
        global timestamps

        #for i in range(1, len(angles)):
        #	if (abs(angles[i]-angles[i-1])>30):
        #		angles[i] = angles[i-1] #replace the spike with the previous angle (will be as if it never changed)

        freq = []
        last_t = 0
        times = []

        for s in steps:
            angles.append(0.15 * s)

        for t in timestamps:
            t = t - start_time
            times.append(t)
            try:
                freq.append((t - last_t))
            except:
                freq.append(0)
            last_t = t

        for t in sent_times:
            t = t - sent_times[0]

        plt.plot(times, steps)
        #		plt.plot(timestamps, freq)
        #plt.plot(sent_times, theta_list)
        plt.show()
        #print("angles = ", angles)
        #print("timestamps = ", timestamps)
        #t0 = 0
        for i in range(0, len(angles)):
            print("angles: ", angles[i])  #, "\tdata: ", datalist[i])

    #buzzer end pattern
        GPIO.output(BUZZ_PIN, 1)
        time.sleep(0.05)
        GPIO.output(BUZZ_PIN, 0)
        time.sleep(0.05)
        GPIO.output(BUZZ_PIN, 1)
        time.sleep(0.05)
        GPIO.output(BUZZ_PIN, 0)
        time.sleep(0.05)
        GPIO.output(BUZZ_PIN, 1)
        time.sleep(0.05)
        GPIO.output(BUZZ_PIN, 0)
        time.sleep(0.05)
        GPIO.output(BUZZ_PIN, 1)
        time.sleep(0.5)
        GPIO.output(BUZZ_PIN, 0)

        # cleanup all GPIOs
        cleanup_GPIO()
示例#7
0
def main():

    full_scrn = True

    parser = argparse.ArgumentParser(description='Darknet Yolo V4 Python Detector')
    parser.add_argument("-v", "--video", required=False, default="",	help="path to input video file")
    parser.add_argument("-s", "--show_video", required=False, type=str2bool, nargs='?', const=True, default=False,	help="False for faster")
    parser.add_argument("-f", "--save_video", required=False, default="", help="Save Video output as .mp4")

    args = parser.parse_args()

 
    global led_matrix, buzzer_f, buzzer_l, buzzer_r, buzzer_s, buzzer_t
    global metaMain, netMain, altNames
    global fps_time

    configPath = "../trained-weights/reference/yolov4-tiny.cfg"
    weightPath = "../trained-weights/reference/yolov4-tiny.weights"
    metaPath = "../trained-weights/reference/coco.data"
    #configPath = "../track/yolov4-tiny.cfg"
    #weightPath = "../track/yolov4-tiny_final.weights"
    #metaPath = "../track/obj-google.data"
    
    thresh = 0.3
    if not os.path.exists(configPath):
        raise ValueError("Invalid config path `" +
                         os.path.abspath(configPath)+"`")
    if not os.path.exists(weightPath):
        raise ValueError("Invalid weight path `" +
                         os.path.abspath(weightPath)+"`")
    if not os.path.exists(metaPath):
        raise ValueError("Invalid data file path `" +
                         os.path.abspath(metaPath)+"`")
    if netMain is None:
        netMain = darknet.load_net_custom(configPath.encode(
            "ascii"), weightPath.encode("ascii"), 0, 1)  # batch size = 1
    if metaMain is None:
        metaMain = darknet.load_meta(metaPath.encode("ascii"))
    if altNames is None:
        try:
            with open(metaPath) as metaFH:
                metaContents = metaFH.read()
                import re
                match = re.search("names *= *(.*)$", metaContents,
                                  re.IGNORECASE | re.MULTILINE)
                if match:
                    result = match.group(1)
                else:
                    result = None
                try:
                    if os.path.exists(result):
                        with open(result) as namesFH:
                            namesList = namesFH.read().strip().split("\n")
                            altNames = [x.strip() for x in namesList]
                except TypeError:
                    pass
        except Exception:
            pass
    #cap = cv2.VideoCapture(0)

    if ( not args.video == "" ):
        print("Loading: {}". format(args.video))
        cap = cv2.VideoCapture(args.video, cv2.CAP_GSTREAMER)
    else:
        print("Loading: {}". format(GST_STR))
        cap = cv2.VideoCapture(GST_STR, cv2.CAP_GSTREAMER)
        #cap = cv2.VideoCapture("v4l2src io-mode=2 device=/dev/video0 ! video/x-raw, format=YUY2, width=1920, height=1080, framerate=60/1 !  nvvidconv ! video/x-raw(memory:NVMM), format=(string)I420 ! nvvidconv ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink sync=false async=false drop=true")

    #cap = cv2.VideoCapture("test.mp4")

    #cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_OPENGL)
    if full_scrn == True:
        cv2.namedWindow(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN) 
        cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
        cv2.moveWindow(WINDOW_NAME, 0, 0)
        cv2.resizeWindow(WINDOW_NAME, 640, 360)
    else:
        cv2.namedWindow(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN) 
        cv2.resizeWindow(WINDOW_NAME, 640, 360)

    #cap.set(3, video_width)
    #cap.set(4, video_height)
    

    ##This will write the code
    if ( not args.save_video == "" ):
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        

        if ( args.show_video == True ):
            out_video = cv2.VideoWriter( args.save_video , fourcc, 30, (video_width, video_height))
        else:
            out_video = cv2.VideoWriter( args.save_video , fourcc, 30, (darknet.network_width(netMain), darknet.network_height(netMain)))


    #out = cv2.VideoWriter(
     #   "output.avi", cv2.VideoWriter_fourcc(*"MJPG"), 30.0,
     #   (darknet.network_width(netMain), darknet.network_height(netMain)))


    print("Starting the YOLO loop...")

    
    buzzer_f = VibratorThread( [27], 2, 0.5)
    buzzer_l = VibratorThread( [17], 2, 0.3)
    buzzer_r = VibratorThread( [18], 2, 0.3)
    buzzer_s = VibratorThread( [27], 2, 1)
    buzzer_t = VibratorThread( [17,18], 2, 2)    
    
    buzzer_f.start()
    buzzer_l.start()
    buzzer_r.start()
    buzzer_s.start()
    buzzer_t.start()



    # Create an image we reuse for each detect

    if ( args.show_video == True ):
        darknet_image = darknet.make_image(video_width,video_height,3)
    else:
        darknet_image = darknet.make_image(darknet.network_width(netMain),
                                        darknet.network_height(netMain),3)

    #faulthandler.enable()



    GPIO.setmode(GPIO.BCM)
    GPIO.setwarnings(False)
    GPIO.remove_event_detect(shutdown_pin)

    print("Starting demo now! Press CTRL+C to exit")

    
    try: 
        print("Setting Shutdown to LOW")
        GPIO.setup(shutdown_pin, GPIO.OUT) 
        GPIO.output(shutdown_pin, GPIO.LOW)
        ##time.sleep(1)
        GPIO.cleanup(shutdown_pin)
        GPIO.setup(shutdown_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
        GPIO.add_event_detect(shutdown_pin, GPIO.BOTH, callback=button_callback) # Setup event on pin 10 rising edge          

    finally:
        pass




    while True:

        
        if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the window
            # If yes, terminate the program

            
            #stop the buzzer
            if non_stop_buzzer.isAlive():
                try:
                    non_stop_buzzer.stopit()
                    non_stop_buzzer.join()
                except:
                    pass        
            break            
            
                 
        

        prev_time = time.time()
        ret, frame_read = cap.read()

        
        if ret != True:
            print("Video open failed, run:")
            print("sudo systemctl restart nvargus-daemon")
            break
        

        cv2.imshow(WINDOW_NAME, frame_read)                 
        frame_rgb = cv2.cvtColor(frame_read, cv2.COLOR_BGR2RGB)

        if ( args.show_video == True ):
            darknet.copy_image_from_bytes(darknet_image,frame_rgb.tobytes())
        else:

            frame_resized = cv2.resize(frame_rgb,
                                    (darknet.network_width(netMain),
                                    darknet.network_height(netMain)),
                                    interpolation=cv2.INTER_LINEAR)

            darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())
        
        #Printing the detections
        detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=thresh, debug=False)

        #print(detections)

        ### Edited the code to perform what what ever you need to
              

        if ( args.show_video == True ):             
            image = cvDrawBoxes(detections, frame_rgb)
        else:
            
            image = cvDrawBoxes(detections, frame_resized)
        
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)        

        myCustomActions(detections,image)

        cv2.putText(image,
                    "FPS: {:.2f}" .format( (1.0 / (time.time()-prev_time)) ),
                    (10, 25),  cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 255, 0), 2)

        # resize image
        #dim = (640, 480)        
        #resized = cv2.resize(image, dim, interpolation = cv2.INTER_LINEAR)
    
        cv2.imshow(WINDOW_NAME, image)

        if ( not args.save_video == "" ):
            out_video.write(image)

        print("FPS: {:.2f}".format( 1/(time.time()-prev_time) )  )
        print("**************")


        key = cv2.waitKey(1)
        if key == 27 or key == ord("q"): # ESC 

            try:
                buzzer_f.stopit()
                buzzer_f.join()
                buzzer_l.stopit()
                buzzer_l.join()
                buzzer_r.stopit()
                buzzer_r.join()
                buzzer_s.stopit()
                buzzer_s.join()
                buzzer_t.stopit()
                buzzer_t.join()
            except:
                pass

            #stop the buzzer
            """
            if non_stop_buzzer.isAlive():
                try:
                    non_stop_buzzer.stopit()
                    non_stop_buzzer.join()
                except:
                    pass        
            break
            """

        
    cap.release()
    GPIO.cleanup()


    if ( not args.save_video == "" ):
        out_video.release()
示例#8
0
 def _init_interrupt(self):
     GPIO.remove_event_detect(self.intPin)
     GPIO.add_event_detect(self.intPin,
                           GPIO.RISING,
                           callback=self._interruptHandler)
示例#9
0
 def deactivate(self):
     GPIO.remove_event_detect(self.button_pin)