예제 #1
0
def main(argv):
    if Config.debug["enabled"] == False:
        database = str(datetime.datetime.now())
    else:
        database = "stein"
    camera = PiCamera()
    camera.shutter_speed = Config.cam["shutterSpeed"]
    camera.resolution = (Config.cam["res"][0], Config.cam["res"][0])
    time.sleep(Config.boot["waitTime"])

    while True:
        rawCapture = PiRGBArray(camera)
        if Config.debug["enabled"] == False:
            Webserver.CreateStoneTable(database)
        camera.capture(rawCapture, format="bgr")
        img = rawCapture.array
        camera.stop_preview()
        #cv.imwrite("test.jpeg", img)
        img = Vision.white_balance(img)
        img = Vision.FindBo(img)
        if not img.any == -1:
            stones, score = Vision.FindStones(img)
            Webserver.ClearTable(database)
            print(stones)
            print(score)
            if stones != -1:
                temp = []
                Webserver.InsertData(database, ["Nummer", "PosX", "PosY", "Farge"], [0, score[0], score[1], 0])
                for i in range(len(stones[1])):
                    temp.append([i+1, stones[0][i][0], stones[0][i][1], stones[1][i]])
                for i in range(len(temp)):
                    Webserver.InsertData(database, ["Nummer", "PosX", "PosY", "Farge"], temp[i])
        time.sleep(Config.boot["updateFreq"])
예제 #2
0
    def output_handler(req, err):
        for result in req.results():
            cand = result.topCandidates_(1)[0]
            string = cand.string()
            confidence = cand.confidence()

            rect, err = cand.boundingBoxForRange_error_(
                Quartz.NSRange(0, len(string)), None)
            box = rect.boundingBox()

            box = Vision.VNImageRectForNormalizedRect(box, width, height)
            x, y = box.origin.x, box.origin.y
            w, h = box.size.width, box.size.height
            ocr_data_text.append((x, height - y - h, w, h, confidence, string))

            for match in re.finditer(r"(\S+)", string):
                start, end = match.span()
                rect, err = cand.boundingBoxForRange_error_(
                    Quartz.NSRange(start, end - start), None)
                box = rect.boundingBox()

                box = Vision.VNImageRectForNormalizedRect(box, width, height)
                x, y = box.origin.x, box.origin.y
                w, h = box.size.width, box.size.height

                ocr_data_words.append(
                    (x, height - y - h, w, h, confidence, match.group()))
예제 #3
0
파일: Sea.py 프로젝트: rodelta/PythonBot
def getBuff(box, ref, target=None):

    sys.stdout.write('\nSearching buff')
    start = time()

    point = None
    multiple = False
    if target is None:
        targets = (
            Vision.cv.imread(Ref.fast, 0),
            Vision.cv.imread(Ref.ammo, 0),
            Vision.cv.imread(Ref.fair, 0))
        multiple = True
    else:
        refImg = Vision.cv.imread(target, 0)

    while point is None:
        sys.stdout.write('.')
        start = time()
        img = Vision.getCv(Vision.getBmp(box, handle), 2)
        mask = Vision.maskColor(img, 0, -1)
        if multiple:
            for refImg in targets:
                point, _ = Vision.match(mask, refImg)
                if point is not None:
                    pass
        else:
            point, _ = Vision.match(mask, refImg)

    point = offset(point, box)  # Convert back to parent coords
    print("Buff found at {} in {} sec").format(point[0], (time() - start))
    return point
예제 #4
0
파일: Sea.py 프로젝트: rodelta/PythonBot
def rowTo(box, coords):
    # TODO: Resize
    start  = time()
    sys.stdout.write('\nTracking bar')
    done = False
    x, y = coords[0], box[1] + (box[3] - box[1]) / 2
    area = [x, box[1], box[2], box[3]]
    while done is False:
        sys.stdout.write('.')
        img  = Vision.getCv(Vision.getBmp(area, handle), 2)
        mask = Vision.maskColor(img, 1, -1)
        cnts = Vision.cv.findContours(
            mask.copy(),
            Vision.cv.RETR_EXTERNAL,
            Vision.cv.CHAIN_APPROX_SIMPLE)[-2]

        if len(cnts) > 0:
            c = max(cnts, key=Vision.cv.contourArea)
            (bar, _), _ = Vision.cv.minEnclosingCircle(c)
            area[3] = int(bar) + 5
            if area[3] < 15:
                Movement.leftUp()
                done = True
    print("Row completed in {}\n").format((time() - start))
    return True
예제 #5
0
 def takePhoto(self):
     Vision.storeCapture(My_Path+"/imageTest.png")
     Vision.resizeImage(My_Path+"/imageTest.png", 1000)
     self.top = tk.Toplevel()
     self.top.title("Capture")
     self.canvas = tk.Canvas(self.top, width=1000, height=600)
     self.canvas.pack(fill="both", expand=True)
     self.capture = tk.PhotoImage(file=(My_Path+"/imageTest.png"))
     self.canvas.create_image(500,300,image=self.capture)
     self.canvas.image = self.capture
예제 #6
0
    def analysePlants(self):
        #Plants detection
        #!!!Need to add code to save the area!!!
        Vision.analysePlantArea(My_Path, Tresh_Area)

        self.top = tk.Toplevel()
        self.top.title("Analyse capture")
        self.canvas = tk.Canvas(self.top, width=1000, height=600)
        self.canvas.pack(fill="both", expand=True)
        self.capture = tk.PhotoImage(file=(My_Path+"/imageTest.png"))
        self.canvas.create_image(500,300,image=self.capture)
        self.canvas.image = self.capture
예제 #7
0
def buyLand():
    box = Vision.getBox(None, Ref.winName)
    img = Vision.getCv(Vision.getBmp(box), False)
    buyMode = getLandBuy(img)
    target = Vision.cv.imread(Ref.coin, 0)
    array = Vision.matchAll(img, target)

    for point in array:
        Movement.mousePos(point)
        Movement.leftClick()
        Movement.mousePos(buyMode)
        Movement.leftClick()
        sleep(.3)
예제 #8
0
파일: Sea.py 프로젝트: rodelta/PythonBot
def getItem(box, target, absolute=False):
    start = time()
    point = None
    ref = Vision.cv.imread(target, 0)
    while point is None:  # waiting for the interface
        sys.stdout.write('.')
        img = Vision.getCv(Vision.getBmp(box, handle, dcTuple), 2)
        mask = Vision.maskColor(img, 0, -1)
        point, _ = Vision.match(mask, ref)
    if absolute:
        point = offset(point, box)
    print("Item found - %s sec " % (time() - start))
    return point
예제 #9
0
파일: civ.py 프로젝트: krazimov/PythonBot
def buyLand():
    box = Vision.getBox(None, Ref.winName)
    img = Vision.getCv(Vision.getBmp(box), False)
    buyMode = getLandBuy(img)
    target = Vision.cv.imread(Ref.coin, 0)
    array = Vision.matchAll(img, target)

    for point in array:
        Movement.mousePos(point)
        Movement.leftClick()
        Movement.mousePos(buyMode)
        Movement.leftClick()
        sleep(.3)
예제 #10
0
    def __init__(self, KBpath):

        self.KBpath = KBpath
        self.g = None

        # Abort the whole program
        self.close_flag = False

        # flags used for action recording
        self.actionVideo = []
        self.video_name = ' '
        self.record_start_flag = False
        self.record_stop_flag = False
        self.record_flag = False

        print(
            '\033[1;33;0m Initializing hand detector. Please cover the squares with your hands... \033[0m'
        )
        self.hd = Recognizer.handDetector(0)
        self.hd.initDetector()
        self.cur_frame = []
        self.handPosition = 0

        self.IR = Vision.ImageRecognizer('./')

        # mode = (input("Use voice?(Y/N)")).upper()
        self.mode = 'N'
        if self.mode == 'Y':
            # SpeechRecognizer initialization
            self.ARGS, self.model = init_Speechrec()
예제 #11
0
 def generate(self, kind=""):
     td = self.getSection('Vision')
     xr = float(td['x_ref'])
     yr = float(td['y_ref'])
     zr = float(td['z_ref'])
     h = td['header']
     t = td['trailer']
     p = td['path']
     rot_val = td['rotation'] == 'True'
     debug_val = td['debug'] == 'True'
     livedebug_val = td['live'] == 'True'
     origin = (xr, yr, zr)
     retval = Vision.Vision(origin, h, t, p, rot_val, debug_val,
                            livedebug_val)
     td = self.getSection('Detection')
     cx = float(td['canvas_x'])
     cy = float(td['canvas_y'])
     cw = float(td['canvas_w'])
     ch = float(td['canvas_h'])
     ca = float(td['canvas_a'])
     retval.canvas = ((cx, cy), (cw, ch), ca)
     ocx = float(td['optical_centre_x'])
     ocy = float(td['optical_centre_y'])
     retval.optical_centre = (ocx, ocy)
     r = int(td['highlight_red'])
     g = int(td['highlight_green'])
     b = int(td['highlight_blue'])
     retval.highlight = (r, g, b)
     return retval
예제 #12
0
 def __init__(self):
     self.move = Move()
     self.behave = Behave()
     self.sense = Sense()
     self.vision = Vision()
     self.stopRetrieving = False
     self.communicate = Communicate()
     self.retrieveMessagesThread = Thread(target=self.retrieveMessages)
예제 #13
0
def snailInit(processMode):
	global Pflag
	Pflag = processMode
	VS.Init(processMode)
	MC.Init()
	Threads.ThreadInit(processMode)
	MC.SetMotor(cmd,250)
	time.sleep(1)
 def updatePlantsArea(self):
     for row in self.listOfRows:
         for bed in row.listOfBeds:
             for line in range(bed.unitQuantityLength):
                 for column in range(bed.unitQuantityWidth):
                     if bed.bedMap[line][column] != None and bed.bedMap[
                             line][column] != "Used":
                         #MotorControl.easyGoTo(bed.icorpor.icorporStepMap[line][column])
                         capture = Vision.getCapture()
                         contours = Vision.pipelineGetContours(capture)
                         listOfAreas = Vision.drawAreasBoundingBox(
                             capture, contours, Vision.Tresh_Area)
                         listOfAreasInsideTresh = Vision.analyseAreasProximity(
                             listOfAreas, Vision.Dist_Tresh)
                         listAreaGroup = Vision.findDuplicatesAreas(
                             listOfAreasInsideTresh)
                         """ATTENTION CODE UNDER IS NOT RELIABLE: NEED TO IMPROVE"""
                         bed.bedMap[line][column].area = listAreaGroup[0]
예제 #15
0
	def __init__(self, knownDistance, knownWidth):

		self.KNOWN_DISTANCE = knownDistance
		self.KNOWN_WIDTH = knownWidth
		self.image_path = '../tools/calibration/opencv_image_0.png'	
		image_calibrate = cv2.imread(self.image_path)
		detectYellow = blob.get_blob('yellow', image_calibrate)
		c_marker = detectYellow.find_marker()	
		self.focalLength = (c_marker[1][0] * knownDistance) / knownWidth
예제 #16
0
def find_landmark(robot, ID):

    #scan each color pattern in the current frame to try and find a landmark
    img = robot.stream.read()
    #img_path = '../tools/calibration/opencv_image_0.png'
    #img = cv2.imread(img_path)

    detectRed = Vision.get_blob('red', img)
    red_blobs = []
    red_blobs = detectRed.getMultipleFeatures(160, 240)

    detectGreen = Vision.get_blob('green', img)
    green_blobs = []
    green_blobs = detectGreen.getMultipleFeatures(160, 240)

    detectBlue = Vision.get_blob('blue', img)
    blue_blobs = []
    blue_blobs = detectBlue.getMultipleFeatures(160, 240)

    get_landmark = Landmarks(
        red_blobs, green_blobs, blue_blobs
    )  #initialize the landmarker finder class with our three blobs
    landmark_bearing, landmark_cx, landmark_cy, landmark_area, landmark_marker = get_landmark.position(
        ID)

    if (robot.debug):

        detectGreen.drawMultipleFeatures(green_blobs)
        detectRed.drawMultipleFeatures(red_blobs)
        detectBlue.drawMultipleFeatures(blue_blobs)
        cv2.putText(img, 'Landmark: {}, {}'.format(landmark_cx,
                                                   landmark_cy), (50, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2, cv2.LINE_AA)

        cv2.imshow('image', img)

    if not (landmark_bearing == 0):
        landmark_range = robot.measure.distance_to_camera(
            landmark_marker[1][0]) / 100
        print('[SLAMBOT][DEBUG] LANDMARK %s: %s, %s' %
              (ID, landmark_range, landmark_bearing))
        return np.array([[landmark_range, landmark_bearing, ID]]).T

    return np.array([[0.0, 0.0, -1]]).T
예제 #17
0
파일: Sea.py 프로젝트: rodelta/PythonBot
def offset(coords, pad=None, reverse=False):
    if coords is None:
        return None
    if pad is None:
        pad = Vision.getBox(None, Ref.winName)
    if reverse:
        x, y = -pad[0], -pad[1]
    else:
        x, y = pad[0], pad[1]
    if len(coords) == 2:  # point
        res = coords[0] + x, coords[1] + y
    else:  # Box, I hope
        res = coords[0] + x, coords[1] + y, coords[2] + x, coords[3] + y
    return res
예제 #18
0
파일: Sea.py 프로젝트: rodelta/PythonBot
def sail(winName=None):
    if winName is None:
        winName = Ref.winName
    box = Vision.getBox(None, winName)
    screen = Vision.getCv(Vision.getBmp(box), 1)

    sys.stdout.write('\nSearching Row btn')
    rowBtn = getItem(box, Ref.row)
    buffBox = getRefBox(rowBtn, Ref.buffBox)
    barBox = getRefBox(rowBtn, Ref.barBox)
    for x in xrange(1, 10):
        Movement.leftUp()
        buff = getBuff(offset(buffBox), rowBtn, Ref.fast)
        Movement.leftUp()
        Movement.mousePos(rowBtn)
        Movement.leftDown()
        sleep(.8)
        rowTo(offset(barBox), buff)
        Movement.leftUp()
        sleep(.8)

    Vision.dropDc(handle, dcTuple)
    return True
예제 #19
0
def draw_rect(disp, r):
    x1, y1 = draw_transform_point(r.x1, r.y1)
    x2, y2 = draw_transform_point(r.x2, r.y2)
    x3, y3 = draw_transform_point(r.x3, r.y3)
    x4, y4 = draw_transform_point(r.x4, r.y4)

    if x1 > 800 and x2 > 800 and x3 > 800 and x4 > 800:
        return
    if x1 < 0 and x2 < 0 and x3 < 0 and x4 < 0:
        return
    if y1 > 800 and y2 > 800 and y3 > 800 and y4 > 800:
        return
    if y1 < 0 and y2 < 0 and y3 < 0 and y4 < 0:
        return
    red, green, blue = Vision.wavelength_to_rgb(r.dop_col)
    pygame.draw.polygon(disp, (red, green, blue), [(x1, y1), (x2, y2),
                                                   (x3, y3), (x4, y4)])
예제 #20
0
파일: Sea.py 프로젝트: rodelta/PythonBot
def rowToByPixel():
    handle = Vision.getHandle()
    winDC  = Vision.wgui.GetDC(handle)
    done = False
    x, y = coords[0], box[1] + (box[3] - box[1]) / 2
    bar, span  = box[2], bar - 20

    while done is False:
        for px in range(x, bar):
            color = Vision.wgui.GetPixel(winDC, px, y)
            if color == 16777215:
                bar, span  = px, bar - 10
                break
        if (bar - x) < 15:
            Movement.leftUp()
            done = True
    Vision.wgui.ReleaseDC(handle, winDC)
    return True
예제 #21
0
map_width = win_width-200
map_height = win_height
robot_width = 20
robot_height = 20
robot_pos_x = map_width/2
robot_pos_y = map_height/2
robot_image = "gui/rocket.png"


win = MainWindow(win_width, win_height, "Monte Carlo Localization - Demo application")
win.FPS = 30
win.set_option("seed", map_seed)

vs = (15,15)

vision = Vision.Sensor(vs)
motion = Motion.Sensor()

# OPTIONS for noise: GAUSSIAN, SALT, PEPPER, SALT_PEPPER, SPECKLE
# OPTIONS for model: MDIFF, SQDIFF, CCORR, CCOEFF
vision_sensor = Vision.Sensor(vs, fps=5)
vision_sensor_noise = Vision.SensorNoiseModel.GAUSSIAN(0.5)
vision_sensor.set_noise_model(vision_sensor_noise)
vision_model = Vision.ObservationModel.MDIFF

# OPTIONS for noise: GAUSSIAN, ADVANCED
# OPTIONS for model: GAUSSIAN, ADVANCED
motion_sensor = Motion.Sensor(fps = 15)
motion_sensor_noise = Motion.SensorNoiseModel.ADVANCED( (0.3,0.5) )
motion_sensor.set_noise_model(motion_sensor_noise)
motion_model = Motion.ObservationModel.ADVANCED
예제 #22
0
파일: civ.py 프로젝트: krazimov/PythonBot
def getLandBuy(img):
    target = Vision.cv.imread(Ref.landBtn, 0)
    point, _ = Vision.match(img, target)
    return (point[0] + 100, point[1])
예제 #23
0
파일: demomenu.py 프로젝트: joesox/raspiAI
 def _visionvideo(self):
     """ Demo Vision class """
     v = Vision.demo2()
예제 #24
0
파일: demomenu.py 프로젝트: joesox/raspiAI
 def _visionphoto(self):
     """ Demo Vision class """
     v = Vision.demo()
예제 #25
0
# $Header: /opt/cvs/python/packages/share1.5/Vision/bin/runVision.py,v 1.48 2008/07/30 18:33:40 vareille Exp $
# $Id: runVision.py,v 1.48 2008/07/30 18:33:40 vareille Exp $

# vision can be launched from a python shell like this:
#import Vision; Vision.runVision()

import sys
import Vision

if '__IP' in dir(): # ipython
    Vision.runVision(sys.argv, ownInterpreter = False)
else:
    Vision.runVision(sys.argv, ownInterpreter = True)

예제 #26
0
파일: Sea.py 프로젝트: krazimov/PythonBot
    winName = "Nox App Player"

    folder  = "Templates/Vikings/Sea/"
    dist = folder + "distanceSea.bmp"
    town = folder + "townNameSea.bmp"
    ammo = folder + "ammoIcon.bmp"
    slow = folder + "speed1Icon.bmp"
    fair = folder + "speed2Icon.bmp"
    fast = folder + "speed3Icon.bmp"
    row  = folder + "row.bmp"

    buffBox = -810, - 75, - 55, + 130
    barBox = -750, + 20, - 70, + 24

#  globals
handle = Vision.getHandle()
dcTuple = Vision.getDc()


def sail(winName=None):
    if winName is None:
        winName = Ref.winName
    box = Vision.getBox(None, winName)
    screen = Vision.getCv(Vision.getBmp(box), 1)

    sys.stdout.write('\nSearching Row btn')
    rowBtn = getItem(box, Ref.row)
    buffBox = getRefBox(rowBtn, Ref.buffBox)
    barBox = getRefBox(rowBtn, Ref.barBox)
    for x in xrange(1, 10):
        Movement.leftUp()
예제 #27
0
import Communication
import Vision

from threading import Thread

best_targ = None


def send_data():
    while True:
        Communication.update_tables(best_targ)


def find_targets():
    while True:
        global best_targ
        best_targ = Vision.find_vision_targets()


Communication.init()
Vision.init()

coms = Thread(send_data())
vision = Thread(find_targets())

Communication.init()
Vision.init()

coms.start()
vision.start()
예제 #28
0
def get_image(input):
    frame = cv2.imread(input)
    
    img = v.preprocess(frame)

    return np.array(img)
예제 #29
0
            '\\sample_pictures\\test_set_2\\08.jpg'
DISPLAY_GRAPH = True
RECOMPUTE_PROJ = True


def get_image(input):
    frame = cv2.imread(input)
    
    img = v.preprocess(frame)

    return np.array(img)



frame =  get_image(input_path)
filpoly = v.colorfilter("BLACK")
maskpoly = filpoly.get_mask(frame)
if RECOMPUTE_PROJ:
    filr = v.colorfilter("RED")
    filg = v.colorfilter("GREEN")
    filb = v.colorfilter("BLUE")
    fily = v.colorfilter("YELLOW")
    maskr= filr.get_mask(frame)
    r = v.getCentroid(maskr)
    BL,f = v.getCentroid(maskr)
    maskg= filg.get_mask(frame)
    TL,f = v.getCentroid(maskg)
    maskb= filb.get_mask(frame)
    BR,f = v.getCentroid(maskb)
    masky= fily.get_mask(frame)
    TR,f = v.getCentroid(masky)
예제 #30
0
파일: demomenu.py 프로젝트: joesox/raspiAI
 def _visionphoto(self):
     """ Demo Vision class """
     v = Vision.demo()
예제 #31
0
파일: demomenu.py 프로젝트: joesox/raspiAI
 def _visionvideo(self):
     """ Demo Vision class """
     v = Vision.demo2()
예제 #32
0
def test():
    input_path = '../sample_pictures/test_set_2/03.jpg'
    img = v.get_image(input_path)
    imgprep = v.preprocess(img)
    vis = v.Vision(imgprep)
    rob = np.array([500, 500, math.pi / 2])
예제 #33
0
    def initSession(self):
        """ Activate and Initialize a interaction session with ontology knowledge base. """
        while True:
            # Enquiring...
            print(
                "\033[1;33;0m Please select a scenario ... \n ( Say 'new kitchen' to new a knowledge base for kitchen situation)\n ( Say 'open kitchen' to open a existing knowledge base)\n  \033[0m"
            )
            if self.mode == 'Y':
                order = (input_voice_secure(ARGS, model,
                                            "waiting for order...")).lower()
            else:
                order = (input("waiting for order...")).lower()

            # Create a new knowledge base
            match = re.match('new', order)
            if match is not None:
                title = order[4:]
                self.title = title
                if len(
                        title
                ) == 0:  # If title was not specified, it will be specified as current local time
                    title = time.strftime('%Y-%m-%d %H:%M:%S',
                                          time.localtime(time.time()))

                filep = os.path.join(self.KBpath, title)
                print('\033[1;33m [Note] The path: \033[0m', end='')
                print(filep)
                create_flag = True

                if os.path.exists(filep):
                    print("\033[1;31m Knowledge base " + title +
                          " already exists! \033[0m")
                    print(
                        "\033[1;33m Would you like to delete the existing KB and create a new one? [Y/N] \033[0m"
                    )
                    print(
                        "\033[1;33m-If you say 'yes', the current one will be deleted and a new one with the same title will be created; \033[0m"
                    )
                    print(
                        "\033[1;33m-If you say 'no', the current one will be opened and loaded into memory \033[0m"
                    )
                    while True:
                        ans = input()
                        if ans == 'y' or ans == 'yes':
                            print(
                                "\033[0;31m [Note] The existing KB has been deleted \033[0m"
                            )
                            shutil.rmtree(filep)
                            break
                        elif ans == 'n' or ans == 'no':
                            self.load_KB(os.path.join(filep, title + '.nxkb'))
                            create_flag = False
                            break
                        else:
                            print(
                                '\033[1;31m [Warning] Invalid order! \033[0m')

                if create_flag:
                    # Create a new KB
                    self.g = nx.DiGraph(title=title,
                                        node_number_dict={'tool': []})
                    filename = os.path.join(self.KBpath, title)
                    os.mkdir(filename)
                    filename = os.path.join(self.KBpath, title, 'Objects')
                    os.mkdir(filename)
                    filename = os.path.join(self.KBpath, title, 'Motions')
                    os.mkdir(filename)
                    filename = os.path.join(self.KBpath, title,
                                            title + '.nxkb')
                    self.save_KB(filename)
                    print(
                        "\033[1;34m [Note] Knowledge base created successfully...\033[0m"
                    )

            # Open an existing knowledge base
            match = re.match('open', order)
            if match is not None:
                title = order[5:]
                self.title = title
                filep = os.path.join(self.KBpath, title, title + '.nxkb')
                try:
                    self.load_KB(filep)
                except FileNotFoundError:
                    print("[Note] Knowledge base file", filep,
                          "not found. Please choose your KB again...")

            if self.g is not None:
                print(
                    "\033[1;34;0m [Note] Knowledge base opened successfully...\n \033[0m"
                )
                print('\033[1;34;0m <-------------------------- ' + title +
                      ' --------------------------> \033[0m')
                self.IR = Vision.ImageRecognizer(self.KBpath, title)

                return
            else:
                print(
                    '\033[0;31m [Warning] Invalid order, Please say it again ... \033[0m'
                )
예제 #34
0
def find_targets():
    while True:
        global best_targ
        best_targ = Vision.find_vision_targets()
예제 #35
0
        logger.debug("User interrupt (main)")
        interrupted = True
    logger.debug("Main terminated with interrupt = " + str(interrupted))
    return interrupted


# This is the main application to be called to run the whole robot
if __name__ == '__main__':

    logger.debug("Starting control thread")
    logger.debug("Starting senor thread")
    s = Sense.Sense()
    s.start()
    c = Control.Control(s)
    c.start()
    v = Vision.Vision()
    v.start()
    while (1):
        logger.info("Starting web server")
        interrupted = main()
        if (interrupted == True):
            break
    # Signal termination
    logger.info("User interrupt")
    s.terminate()
    c.terminate()
    v.terminate()
    logger.debug("Main loop finished (__main__")
    # Wait for actual termination (if needed)
    c.join()
    s.join()
예제 #36
0
#include files for Robot
import gym  #OpenAI API
import Movement
import Data
import Vision
import Height
import Hands

#include files for Verical Farm
import Farm
import Temperture
import Humidity
from Lighting import Lighting

#Objects for the Robot
CV = Vision.Vision()
Height = Height.Height()
Hands = Hands.Hand()


def testRobot():
    print("Hello, I'm FarmBot")
    CV.testVision()
    Height.testHeightMovement()
    Hands.testHands()


#Objects for the Farm
Farm = Farm.Farm()
Temp = Temperture.Temperature()
Humdit = Humidity.Humidity()