def initializeRob():
     global bot
     bot = Bot()
     bot.calibrate()
     sonic = ctypes.CDLL("/usr/local/lib/libsonic.so")
     sonic.measure.restype = ctypes.c_double
     sonic.initalize()
Beispiel #2
0
class HannoverMesse:
    def __init__(self):
        self._bot = Bot()
        self._bot.calibrate()

        self._linetracker = self._bot.linetracker()
        #self._objdetector = self._bot.objectdetector()

    def start(self):

        input('press key to start')

        self._bot.drive_power(20)
        self._linetracker.autopilot(True)

        try:
            for i in range(60):
                #objs = self._objdetector.detect('../classifiers/palette.xml')
                #print(objs)
                time.sleep(1)
        except KeyboardInterrupt:
            pass

        self._linetracker.autopilot(False)
        self._bot.drive_power(0)

    def run():
        HannoverMesse().start()
Beispiel #3
0
def main():
    bot = Bot()
    bot._camera.start()
    bot._camera.enable_preview()

    while input('press q') == 'q':
        pass

    bot._camera.stop()
Beispiel #4
0
    # bot.camera()._cam.capture(filePath)

    try:
        print("Capturing image")
        buff = BytesIO()
        bot.camera()._cam.capture(buff, format='jpeg')

        print("Sending images")
        buff.seek(0)
        broker.send_file('test_channel', buff.read())
    except Exception as ex:
        print(ex)


print("Initializing BotLib")
bot = Bot()
broker = Broker('gustav', 'gruppe11')


def executeImageCapture():
    captureImage()

    bp = BrickPi3()
    for i in range(3):
        print("Iteration {}/3".format(i + 1))

        bp.set_motor_power(bp.PORT_B, 40)
        sleep(2.0)
        bp.set_motor_power(bp.PORT_B, 0)
        sleep(0.5)
        captureImage()
Beispiel #5
0
class Ampelerkennung:
    APPROACH_POWER, DEFAULT_POWER = 30, 30
    RIGHT_MIN, RIGHT_MAX = 20, 40
    FRONT_MIN, FRONT_MAX = 0, 30
    COLLECT_TIMES = 5

    def __init__(self):
        self._bot = Bot()
        self._bot.calibrate()

    def follow_line(self):
        from multiprocessing import Process

        # Dieser endlose Iterator liefert Lenkunswerte
        # um auf der Linie zu bleiben.
        linetracker = self._bot.linetracker()
        self._track_paused = False

        def follow():
            for improve in linetracker:
                if improve != None:
                    self._bot.drive_steer(improve)
                    sleep(0.1)

                # Wenn Line Tracking angehalten wurde gehe
                # in eine Schleife
                # TODO: Das ist ziemlich ineffizient
                while self._track_paused:
                    sleep(0.1)

        self._track_thread = Process(group=None, target=follow, daemon=True)
        self._track_thread.start()

    def detect_red_color(img):  #Rote Farbe (über Helligkeit) erkennen

        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        red = cv2.inRange(hsv, (0, 0, 255), (255, 40, 255))

        target = cv2.bitwise_and(img, img, mask=red)

        return target

    def detect_green_color(img):  #Grüne Farbe erkennen

        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        green = cv2.inRange(hsv, (35, 127, 127), (50, 255, 255))

        target = cv2.bitwise_and(img, img, mask=green)

        return target

    def detect_circles(
            target_img):  #Mittelpunkte der Konturen berechnen und zurückgeben

        gray = cv2.cvtColor(target_img, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)

        if ret == False:
            print("falsch")
            return None

        cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)

        x_values = []
        y_values = []
        if cnts is None:
            print("nichts")
            return None
        for c in cnts:
            M = cv2.moments(c)

            if M["m00"] != 0:
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
            else:
                continue
            x_values.append(cX)
            y_values.append(cY)

        xy = list(zip(x_values, y_values))
        return xy

    def draw_rectangle(image, rectangle,
                       rot):  #Rechteck in der jeweiligen Farbe zeichnen
        if rot:
            farbe = [0, 0, 255]
        else:
            farbe = [0, 255, 0]

        x, y, w, h = rectangle
        image = cv2.rectangle(image, (x, y), (x + w, y + h),
                              farbe,
                              thickness=2)

    def detect_dark_rectangle(
            img, circles
    ):  #Dunkles Rechteck erkennen, indem ein Kreismittelpunkt liegt
        #und das eine Breite besitzt, die den Erfahrungswerten für einen Abstand zur Ampel von ungefähr 10cm entspricht

        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        blur = cv2.medianBlur(img_gray, 5)

        thresh = cv2.adaptiveThreshold(blur, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY, 11, 2)

        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        rectangle_values = []
        w_values = []

        for contour in contours:
            approx = cv2.approxPolyDP(contour,
                                      0.1 * cv2.arcLength(contour, True), True)

            x = approx.ravel()[0]
            y = approx.ravel()[1]

            if len(approx) == 4:
                x, y, w, h = cv2.boundingRect(approx)

                for x_circle, y_circle in circles:
                    if x < x_circle < x + w and y < y_circle < y + h and w > 10 and w < 40:
                        rectangle_values.append([x, y, w, h])
                        w_values.append(w)

        if rectangle_values is None:
            return None

        w_sorted = sorted(w_values)

        for i in range(len(rectangle_values)):
            if len(rectangle_values) == 1:
                return rectangle_values[0]
            if rectangle_values[i][2] == w_sorted[1]:
                return rectangle_values[i]

    #Main
    def run(self):
        BP = brickpi3.BrickPi3()

        cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)

        if cap.isOpened() == False:  #Überprüfen, ob Kamera bereit ist
            print("ich bin hier")
            cap.open(0)

        #Motor._bp.set_motor_power(BP.PORT_B, 20)
        print("a")
        self.follow_line()
        print("b")
        start = time.time()

        while (cap.isOpened()):
            end = time.time()

            if end - start > 20:  #nach 20 Sekunden abbrechen
                break

            ret, frame = cap.read()
            if not ret:
                print("Frame could not be captured")
                break

            height = frame.shape[0]

            width = frame.shape[1]
            midx = int(width / 2)

            frame = frame[0:height, midx:
                          width]  #Nur auf rechte Seite des Bildes fokussieren

            target_green = detect_green_color(frame)
            target_red = detect_red_color(frame)

            circles_green = detect_circles(target_green)
            circles_red = detect_circles(target_red)

            if not circles_green and not circles_red:
                continue

            final_image = frame.copy()

            if circles_green:
                rectangles_green = detect_dark_rectangle(frame, circles_green)

                if rectangles_green is not None:  #Grünes Licht in Rechteck erkannt -> geradeaus fahren
                    green_image = frame.copy()

                    draw_rectangle(green_image, rectangles_green, False)
                    final_image = green_image
                    print("Grüne Ampel")
        #            Motor._bp.set_motor_power(BP.PORT_B, 20)

            if circles_red:
                rectangles_red = detect_dark_rectangle(frame, circles_red)

                if rectangles_red is not None:  #Rotes Licht in Rechteck erkannt -> Anhalten
                    red_image = frame.copy()

                    draw_rectangle(red_image, rectangles_red, True)
                    final_image = red_image

                    print("Rote Ampel")
        #            Motor._bp.set_motor_power(BP.PORT_B, 0)

            cv2.imshow("Ampel", final_image)
            cv2.waitKey(1)

        #Motor._bp.set_motor_power(BP.PORT_B, 0)

        cap.release()
        cv2.destroyAllWindows()
Beispiel #6
0
 def __init__(self):
     self._bot = Bot()
     self._bot.calibrate()
Beispiel #7
0
#!/usr/bin/python3

from botlib.bot import Bot
from botlib.control import Action, REMOTE_PORT
from readchar import readkey, key

bot = Bot()
power, steer = 0, 0.0


def clamp(vmin, v, vmax):
    return max(vmin, min(v, vmax))


def stop():
    print('stopping...')
    bot.stop_all()


def create_server():
    import socket
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        s.bind(('', REMOTE_PORT))
        s.listen()
        print('listening...')
        con, addr = s.accept()
        print('connected...')
        with con:
            while True:
                inp = con.recv(1024).decode('ascii')
                if len(inp) == 0:
Beispiel #8
0
def main():
    bot = Bot()
    bot.calibrate()
Beispiel #9
0
    def __init__(self):
        self._bot = Bot()
        self._bot.calibrate()

        self._linetracker = self._bot.linetracker()
 def initializeRob():
     global bot
     bot = Bot()
     bot.calibrate()
Beispiel #11
0
from botlib.bot import Bot

# checks if the motors are coupled up correctly

if __name__ == '__main__':
    print('stopping all...')
    Bot().stop_all()
    print('done...')
Beispiel #12
0
step.add_action(SendImageAction('https://i.imgur.com/4QvA1xc.gif'))
step.add_action(EndStoryAction())
story2.add_step(step)

intent2 = Intent('Bonjour', story2)


# Define a 3rd Intent/Story (To exit the bot)
story3 = Story(first_step='exit_step')
exit_step = Step('exit_step')
exit_step.add_action(ExitMessageAction('Bye bye.'))
story3.add_step(exit_step)

intent3 = Intent('bye|revoir', story3)


# Instantiate a bot
irobot = Bot(interface)

# add intents
irobot.add_intent(intent1)
irobot.add_intent(intent2)
irobot.add_intent(intent3)

# run the bot.
try:
    irobot.run()
except (KeyboardInterrupt, EOFError):
    print('\n\nI Robot has been stopped, have a nice day.\n\n')

Beispiel #13
0
def main():
    Bot().calibrate()