示例#1
0
def processImage():

    test = EdgeDetection()

    if test.debug:
        print "-> preprocessing the image"

    test.path = loadFile()

    if test.path == "":

        if test.debug:
            print "path: -"

        return

    if test.debug:
        print "-> path: ", test.path

    test.detectBorders()

    if test.debug:
        print "-> showing up borders"

    paintBorder(test)

    cv2.imwrite("img/result.png", test.imgCopy)  #Write a new image

    print "-> image successfully saved at img/result.png"

    del test.histogram[:]  # remove all the elements in the histogram

    print "-> You can choose another image ..."
示例#2
0
  def make_dask(darray, output, attribute_class, attribute_type, kernel):

    if output == '2d':

      if attribute_class == 'Amplitude':
        x = SignalProcess()
        darray, chunks_init = SignalProcess.create_array(x, darray, kernel, 
                                                         preview=None)
        darray = darray.T

      if attribute_class == 'CompleTrace':  
        x = ComplexAttributes()
        darray, chunks_init = ComplexAttributes.create_array(x, darray, kernel, 
                                                             preview=None)
        darray = darray.T 

      if attribute_class == 'DipAzm':
        x = DipAzm()
        darray, chunks_init = DipAzm.create_array(x, darray, kernel=None, 
                                                  preview=None)
        darray = darray.T
      
      if attribute_class == 'EdgeDetection':  
        x = EdgeDetection()
        darray, chunks_init = EdgeDetection.create_array(x, darray, kernel, 
                                                         preview=None)
        darray = darray.T      

    if output == '3d':

      if attribute_class == 'Amplitude':
        x = SignalProcess()
        darray, chunks_init = SignalProcess.create_array(x, darray, kernel, 
                                                         preview=None)

      if attribute_class == 'CompleTrace':  
        x = ComplexAttributes()
        darray, chunks_init = ComplexAttributes.create_array(x, darray, kernel, 
                                                             preview=None)

      if attribute_class == 'DipAzm':
        x = DipAzm()
        darray, chunks_init = DipAzm.create_array(x, darray, kernel=None, 
                                                  preview=None)
      
      if attribute_class == 'EdgeDetection':  
        x = EdgeDetection()
        darray, chunks_init = EdgeDetection.create_array(x, darray, kernel, 
                                                         preview=None)

    return(x, darray)
示例#3
0
    def __init__(self, naoMotions):
        # no inits
        self.emotionExpressionDict = ["Happy", "Hopeful", "Sad", "Fearful", "Angry",
                                      "Happy2", "Hopeful2", "Sad2", "Fearful2", "Angry2",
                                      "Happy3", "Hopeful3", "Hopeful4",
                                      "Scared1", "Scared2", "Scared3"] #(pickup, touch, high)
                                    
        self.numOE = len(self.emotionExpressionDict)
        self.naoMotions = naoMotions
        self.naoIsSafe = True
        self.naoIsTouched = False
        self.naoIsPickedUp = False
        self.naoIsHighEdge = False

        self.naoSeesHigh = False
        self.naoSeesHighTest = False
        self.waitingForInput = False

        self.wasJustScared = False
        self.scaredAllowed = True
        self.fDB = FoodDBManager()

        NAOip, NAOport = naoMotions.getConnectInfo()
        try:
            self.edgeDetector = EdgeDetection(NAOip, NAOport)
        except Exception as e:
            print "Edge Detector Failed to Create, possibly no camera on unit"
            print e
示例#4
0
def getBorders(path):
    test = ed.EdgeDetection()

    test.path = path

    test.detectBorders()

    return paintBorder(test)
示例#5
0
 def configure_auto(self):
     # Camera Calibration
     self.cam_calib = CameraCalib(9, 6)
     self.cam_calib.import_calib("calibration_pickle.p")
     # Edge Detection with gradient and color space
     self.edge_detection = EdgeDetection()
     # add function
     self.edge_detection.add_func(Sobel(7, 20, 120, 'x'))
     self.edge_detection.add_func(SobelGratitude(5, 70, 100))
     self.edge_detection.add_func(AbstHLSElement(160, 255, 'S'))
     # Perspective Transformation
     src_range = np.float32([[550, 470], [200, 700], [1100, 700],
                             [750, 470]])
     dst_range = np.float32([[180, 0], [180, 700], [1100, 700], [1100, 0]])
     self.perspec_trans = PerspectiveTransform(src_range, dst_range)
     # Line Detection
     self.line_detection = LineDetection(margin=50)
示例#6
0
    def __init__(self):

        self.myBackground = 0
        self.test = ed.EdgeDetection()

        if self.test.debug:
            print "-> preprocessing the image"

        self.test.path = ""
示例#7
0
class MyTest(unittest.TestCase):
    myScraper = Scraper()
    myDb = DataBase('avatar.db')
    collectordb = CollectorDB('collector.db')
    imageCorrection = ImageCorrection()
    edge_detection = EdgeDetection()
    image_warper = ImageWarper()
    image_hasher = ImageHasher()
    test_image = cv2.imread("warped.jpg")
    second_test = cv2.imread("ow.jpg")

    def test(self):
        # test collector string
        self.assertEqual(self.collectordb.check_game("Grand Theft Auto V"),
                         "Grand Theft Auto V")
        # second test
        self.assertNotEqual(self.collectordb.check_game("Grand Theft Auto V"),
                            "Tekken 7")

        # checking hamming distance calculation
        self.assertEqual(
            self.myDb.ham_dst("2d2ec9e064603833", "2d2ec9e064603833"), 0)
        self.assertEqual(
            self.myDb.ham_dst("2d2bf9e56460f833", "2d2ec9e064603836"), 5)

        # no two images should have same hash
        self.assertEqual(self.image_hasher.generate_hash(self.test_image),
                         "2f6c71b19336322a")
        self.assertNotEqual(self.image_hasher.generate_hash(self.second_test),
                            "2f6c71b19336322a")

        # retrieving from db should always be a list
        self.assertIsInstance(self.myDb.fetch_hash("2d2ec9e064603836"), list)
        # cv2 should return images
        self.assertIsInstance(self.edge_detection.canny_edge(self.second_test),
                              np.ndarray)

        self.assertTrue(
            self.myDb.ham_dst("9897f4b0f0cc0ef8", "dd9237b0704c4ed9"))
示例#8
0
    def __init__(self):
        super(ImageWidget, self).__init__()

        # data_path = '/homes/jannik/BVSiAB/RoadSegmentation_Tutorial'
        data_path = "/home/jan/Downloads/RoadSegmentation_Tutorial/"
        load_dir_images = "images/"
        load_dir_groundTruth = "ground_truth/"
        data_dir = "data/"
        stump_images = "_im_cr.ppm"
        # stump_images = 'jan.ppm'
        stump_groundTruth = "_la_cr.pgm"

        # objekts
        self.bev = BevTransformation()
        self.linefitter = LineFitting()
        self.edgedetection = EdgeDetection()
        self.polygonfitter = PolygonFitting()

        # get list of files in directory
        image_Data_loc = os.path.join(data_path, load_dir_images, "*" + stump_images)
        gt_Data_loc = os.path.join(data_path, load_dir_groundTruth, "*" + stump_groundTruth)
        self.image_Data_files = glob.glob(image_Data_loc)
        self.image_Data_files.sort()
        self.gt_Data_files = glob.glob(gt_Data_loc)
        self.gt_Data_files.sort()
        self.pos = 0

        cvBGRImg = cv2.imread(self.image_Data_files[self.pos])
        height, width, depth = cvBGRImg.shape
        self.qpm = convertIpl(cvBGRImg)
        self.qpm2 = convertIpl(cvBGRImg)
        self.qpm3 = convertIpl(cvBGRImg)
        self.qpm4 = convertIpl(cvBGRImg)
        self.setMinimumSize(width * 2, height * 3)
        self.setMaximumSize(self.minimumSize())

        self.initUI()
示例#9
0
def main(NAOip, NAOport):
    print "OpenCV Version:", cv2.__version__
    myBroker = ALBroker("myBroker",
                        "0.0.0.0",   # listen to anyone
                        0,           # find a free port and use it
                        NAOip,          # parent broker IP
                        NAOport)        # parent broker port
    global BasicMotions
    BasicMotions = BasicMotions(NAOip, NAOport, myBroker)
    EdgeDetector = EdgeDetection(NAOip, NAOport)
    user_input = "Start"
    delay = 1
    while user_input != "end":
        user_input = raw_input("Enter a command: ")
        user_input = user_input.lower()
        print(user_input)
        if user_input == "speak":
            BasicMotions.naoSay("Oh My Gawd, I can Speak!")
        elif user_input == "sit":
            BasicMotions.naoSit()
        elif user_input == "stand":
            BasicMotions.naoStand()
        elif user_input == "standinit":
            BasicMotions.naoStandInit()
        elif user_input == "walk":
            BasicMotions.naoWalk(0.5, 0.4)
        elif user_input == "nod":
            BasicMotions.naoNodHead()
        elif user_input == "shake head":
            BasicMotions.naoShadeHead()
        elif user_input == "wave right":
            BasicMotions.naoWaveRight()
        elif user_input == "wave both":
            BasicMotions.naoWaveBoth()
        elif user_input == "rest":
            BasicMotions.naoRest()
        #=======EMOTION DISPLAY=============
        elif user_input == "happymotion": # do 3
            time.sleep(delay)
            BasicMotions.happyEmotion()
        elif user_input == "sadmotion": # do 5
            time.sleep(delay)
            BasicMotions.sadEmotion()
        elif user_input == "scared1motion": # touch do 8
            time.sleep(delay)
            BasicMotions.scaredEmotion1()
        elif user_input == "scared2motion": # step back, not use this
            time.sleep(delay)
            BasicMotions.scaredEmotion2()
        elif user_input == "fear1motion": # do 6
            time.sleep(delay)
            BasicMotions.fear1Emotion()
        elif user_input == "fear2motion": # do 2
            time.sleep(delay)
            BasicMotions.fear2Emotion()
        elif user_input == "hope1motion": #do 4
            time.sleep(delay)
            BasicMotions.hope1Emotion()
        elif user_input == "hope2motion": # do 1
            time.sleep(delay)
            BasicMotions.hope2Emotion()
        elif user_input == "angermotion": # do 7
            time.sleep(delay)
            BasicMotions.angerEmotion()
        elif user_input == "lookmotion":
            time.sleep(delay)
            BasicMotions.LookAtEdgeEmotion()
        elif user_input == "edgemotion": # do 9
            time.sleep(delay)
            BasicMotions.FoundEdgeEmotion()
        elif user_input == "scared3motion": # do 10
            time.sleep(delay)
            BasicMotions.scaredEmotion3()

        #=======VOICE EFFECT=============
        elif user_input == "happy":
            BasicMotions.naoSayHappy(BasicMotions.HriDialogEOD['1good'])
            #BasicMotions.happyEmotion()
        elif user_input == "sad":
            BasicMotions.naoSaySad(BasicMotions.HriDialogEOD['1bad'])
            #BasicMotions.sadEmotion()
        elif user_input == "scared":
            BasicMotions.naoSayScared(BasicMotions.HriDialogEOD['31nono'])
            #BasicMotions.scaredEmotion1()
        elif user_input == "fear":
            BasicMotions.naoSayFear(BasicMotions.HriDialogEOD['31nono'])
            #BasicMotions.fearEmotion()
        elif user_input == "hope":
            BasicMotions.naoSayHope(BasicMotions.HriDialogEOD['2yes'])
            #BasicMotions.hopeEmotion()
        elif user_input == "anger":
           BasicMotions.naoSayAnger(BasicMotions.HriDialogEOD['31nono'])
           #BasicMotions.angerEmotion()
        #=======EYE DISPLAY=============
        elif user_input == "happyeye":
            BasicMotions.setEyeEmotion('happy')
        elif user_input == "sadeye":
            BasicMotions.setEyeEmotion('sad')
        elif user_input == "scared1eye":
            BasicMotions.setEyeEmotion('scared1')
        elif user_input == "scared2eye":
            BasicMotions.setEyeEmotion('scared2')
        elif user_input == "feareye":
            BasicMotions.setEyeEmotion('fear')
        elif user_input == "hopeeye":
            BasicMotions.setEyeEmotion('hope')
        elif user_input == "angereye":
            BasicMotions.setEyeEmotion('anger')
        elif user_input == "alarmingeye":
            BasicMotions.blinkAlarmingEyes(5)
        #=======TESTING INTERNAL FUNCTIONS=======
        elif user_input == "blinktop":
            BasicMotions.blinkEyes(random.randint(0x00000000, 0x00FFFFFF), 5.0, "EyeTop")
        elif user_input == "blinkbottom":
            BasicMotions.blinkEyes(random.randint(0x00000000, 0x00FFFFFF), 5.0, "EyeBottom")
        elif user_input == "blinkfull":
            BasicMotions.blinkEyes(random.randint(0x00000000, 0x00FFFFFF), 5.0, "EyeFull")
        elif user_input == "eyecolor":
            BasicMotions.setEyeColor(0x00FFFFFF,"LedEyeTop")
            BasicMotions.setEyeColor(0x00FF00FF,"LedEyeCorner")
            BasicMotions.setEyeColor(0x00FF00FF,"LedEyeBottom")
        #==========TESTING CAMERA============
        elif user_input == "img":
            EdgeDetector.SetDebugMode(True)
            i=0
            BasicMotions.LookAtEdgeEmotion()
            while True:
                status, distance, angle = EdgeDetector.lookForEdge(20,8)
                print "Edge: ", status,distance,angle
                if status==True:
                    BasicMotions. FoundEdgeEmotion()
                    break
                i+=1
                if i>100:
                    print "Not close enough"
                    break


        #============================================
        elif user_input != "end":
            print("That is not an availiable command")

    print("=== Main Program Finished Running ===")
示例#10
0
class ImageWidget(QWidget):
    """ A class for rendering video coming from OpenCV """

    def __init__(self):
        super(ImageWidget, self).__init__()

        # data_path = '/homes/jannik/BVSiAB/RoadSegmentation_Tutorial'
        data_path = "/home/jan/Downloads/RoadSegmentation_Tutorial/"
        load_dir_images = "images/"
        load_dir_groundTruth = "ground_truth/"
        data_dir = "data/"
        stump_images = "_im_cr.ppm"
        # stump_images = 'jan.ppm'
        stump_groundTruth = "_la_cr.pgm"

        # objekts
        self.bev = BevTransformation()
        self.linefitter = LineFitting()
        self.edgedetection = EdgeDetection()
        self.polygonfitter = PolygonFitting()

        # get list of files in directory
        image_Data_loc = os.path.join(data_path, load_dir_images, "*" + stump_images)
        gt_Data_loc = os.path.join(data_path, load_dir_groundTruth, "*" + stump_groundTruth)
        self.image_Data_files = glob.glob(image_Data_loc)
        self.image_Data_files.sort()
        self.gt_Data_files = glob.glob(gt_Data_loc)
        self.gt_Data_files.sort()
        self.pos = 0

        cvBGRImg = cv2.imread(self.image_Data_files[self.pos])
        height, width, depth = cvBGRImg.shape
        self.qpm = convertIpl(cvBGRImg)
        self.qpm2 = convertIpl(cvBGRImg)
        self.qpm3 = convertIpl(cvBGRImg)
        self.qpm4 = convertIpl(cvBGRImg)
        self.setMinimumSize(width * 2, height * 3)
        self.setMaximumSize(self.minimumSize())

        self.initUI()

    def initUI(self):

        self.setGeometry(300, 300, 250, 150)
        self.setWindowTitle("Image")

        self.imageLabel = QtGui.QLabel()
        self.imageLabel.setPixmap(QtGui.QPixmap(self.qpm))
        self.imageLabel.setScaledContents(True)

        self.imageLabel2 = QtGui.QLabel()
        self.imageLabel2.setPixmap(QtGui.QPixmap(self.qpm2))
        self.imageLabel2.setScaledContents(True)

        self.imageLabel3 = QtGui.QLabel()
        self.imageLabel3.setPixmap(QtGui.QPixmap(self.qpm3))
        self.imageLabel3.setScaledContents(True)

        self.imageLabel4 = QtGui.QLabel()
        self.imageLabel4.setPixmap(QtGui.QPixmap(self.qpm4))
        self.imageLabel4.setScaledContents(True)
        # self.imageLabel.pixmap().scaled(QtCore.QSize(self.imageLabel.size()), QtCore.Qt.KeepAspectRatio, QtCore.Qt.FastTransformation)
        # self.imageLabel.move(15, 10)

        okButton = QtGui.QPushButton("OK")
        okButton.clicked.connect(QtCore.QCoreApplication.instance().quit)

        hbox = QtGui.QHBoxLayout()
        hbox.addStretch(1)
        hbox.addWidget(self.imageLabel)
        hbox.addWidget(self.imageLabel2)
        hbox2 = QtGui.QHBoxLayout()
        hbox2.addStretch(1)
        hbox2.addWidget(self.imageLabel3)
        hbox2.addWidget(self.imageLabel4)

        vbox = QtGui.QVBoxLayout()
        vbox.addStretch(1)
        vbox.addLayout(hbox)
        vbox.addLayout(hbox2)

        self.sdlText = QtGui.QLabel()
        self.sdlText.setText("set Min Threshold")
        slda = QtGui.QSlider(QtCore.Qt.Horizontal, self)
        slda.setFocusPolicy(QtCore.Qt.NoFocus)
        slda.setGeometry(30, 40, 100, 30)
        slda.valueChanged[int].connect(self.setMinTreshold)
        sld = QtGui.QHBoxLayout()

        sld.addWidget(self.sdlText)
        sld.addWidget(slda)
        vbox.addLayout(sld)

        self.sdlText2 = QtGui.QLabel()
        self.sdlText2.setText("Set Max Treshold:")
        slda = QtGui.QSlider(QtCore.Qt.Horizontal, self)
        slda.setFocusPolicy(QtCore.Qt.NoFocus)
        slda.setGeometry(30, 40, 100, 30)
        slda.valueChanged[int].connect(self.setMaxTreshold)
        sld = QtGui.QHBoxLayout()

        sld.addWidget(self.sdlText2)
        sld.addWidget(slda)
        vbox.addLayout(sld)

        self.sdlText3 = QtGui.QLabel()
        self.sdlText3.setText("Set X: ")
        slda = QtGui.QSlider(QtCore.Qt.Horizontal, self)
        slda.setFocusPolicy(QtCore.Qt.NoFocus)
        slda.setGeometry(0, 0, 300, 30)
        slda.valueChanged[int].connect(self.setX)
        sld = QtGui.QHBoxLayout()

        sld.addWidget(self.sdlText3)
        sld.addWidget(slda)
        vbox.addLayout(sld)

        self.sdlText4 = QtGui.QLabel()
        self.sdlText4.setText("set Sobel:")
        slda = QtGui.QSlider(QtCore.Qt.Horizontal, self)
        slda.setFocusPolicy(QtCore.Qt.NoFocus)
        slda.setGeometry(30, 40, 100, 30)
        slda.valueChanged[int].connect(self.setSobel)
        sld = QtGui.QHBoxLayout()

        sld.addWidget(self.sdlText4)
        sld.addWidget(slda)
        vbox.addLayout(sld)

        self.sdlText5 = QtGui.QLabel()
        self.sdlText5.setText("set Line Threshold:")
        slda = QtGui.QSlider(QtCore.Qt.Horizontal, self)
        slda.setFocusPolicy(QtCore.Qt.NoFocus)
        slda.setGeometry(30, 40, 100, 30)
        slda.valueChanged[int].connect(self.setLineThreshold)
        sld = QtGui.QHBoxLayout()

        sld.addWidget(self.sdlText5)
        sld.addWidget(slda)

        vbox.addLayout(sld)

        self.sdlText6 = QtGui.QLabel()
        self.sdlText6.setText("set Line Length:")
        slda = QtGui.QSlider(QtCore.Qt.Horizontal, self)
        slda.setFocusPolicy(QtCore.Qt.NoFocus)
        slda.setGeometry(30, 40, 100, 30)
        slda.valueChanged[int].connect(self.setLineLength)
        sld = QtGui.QHBoxLayout()

        sld.addWidget(self.sdlText6)
        sld.addWidget(slda)

        vbox.addLayout(sld)

        self.sdlText7 = QtGui.QLabel()
        self.sdlText7.setText("set K Size:")
        slda = QtGui.QSlider(QtCore.Qt.Horizontal, self)
        slda.setFocusPolicy(QtCore.Qt.NoFocus)
        slda.setGeometry(30, 40, 100, 30)
        slda.valueChanged[int].connect(self.setSobelKSize)
        sld = QtGui.QHBoxLayout()

        sld.addWidget(self.sdlText7)
        sld.addWidget(slda)

        vbox.addLayout(sld)

        vbox.addWidget(okButton)
        self.setLayout(vbox)

        self.timer = QTimer(self)
        self.timer.timeout.connect(self.queryFrame)
        self.timer.start(250)

    # def paintEvent(self, event):
    #   painter = QPainter(self)
    # painter.drawImage(QPoint(0, 0), self.qpm)

    def setMinTreshold(self, value):
        print "slider changed to {0}".format(value)
        self.edgedetection.setMinTresh(value)
        self.sdlText.setText("EDGE set Min Threshold:{0}".format(value))

    def setMaxTreshold(self, value):
        print "slider changed to {0}".format(value)
        self.edgedetection.setMaxTresh(value)
        self.sdlText2.setText("EDGE set Max Threshold:{0}".format(value))

    def setX(self, value):
        print "slider changed to {0}".format(value)
        self.bev.setAmount(value * 4)
        self.sdlText3.setText("BEV set X Threshold:{0}".format(value * 4))

    def setSobel(self, value):
        print "slider changed to {0}".format(value)
        self.edgedetection.setSobel(value / 10)
        self.sdlText4.setText("EDGE set Sobel:{0}".format(value / 10))

    def setSobelKSize(self, value):
        print "slider changed to {0}".format(value)
        self.edgedetection.setSobelKSize(value)
        self.sdlText7.setText("EDGE set Sobel K/Canny AP Size:{0}".format(value))

    def setLineThreshold(self, value):
        print "slider changed to {0}".format(value)
        self.linefitter.setThreshold(value)
        self.sdlText5.setText("LINE set Threshold:{0}".format(value))

    def setLineLength(self, value):
        print "slider changed to {0}".format(value)
        tmp = self.linefitter.setMinLength(value)
        self.sdlText6.setText("LINE set MinLength:{0}".format(tmp))

    def queryFrame(self):

        #        cvBGRImg = cv2.imread(self.image_Data_files[self.pos])
        cvBGRImg = processImage(self.image_Data_files[self.pos])
        cvBGRImg2 = self.edgedetection.computeEdges(cvBGRImg)
        cvBGRImg3b = self.bev.computeBev(cvBGRImg)
        cvBGRImg2a = cv2.cvtColor(cvBGRImg2, cv2.cv.CV_GRAY2BGR)
        cvBGRImg3 = self.bev.computeBev(cvBGRImg2a)
        cvBGRImg3a = cv2.cvtColor(cvBGRImg3, cv2.cv.CV_BGR2GRAY)
        rev, cvBGRImg3a = cv2.threshold(cvBGRImg3a, 200, 255, cv2.THRESH_BINARY)
        # cvBGRImg4 = self.linefitter.findLine(cvBGRImg3a)
        cvBGRImg5 = self.polygonfitter.findPolygon(cvBGRImg3a, cvBGRImg3b.copy())
        cvBGRImg6 = self.bev.computePers(cvBGRImg5)

        self.qpm4 = convertIpl(cvBGRImg6)
        self.qpm3 = convertIplG(cvBGRImg3a)
        self.qpm2 = convertIplG(cvBGRImg2)
        self.qpm = convertIpl(cvBGRImg)

        if len(self.image_Data_files) > self.pos + 1:
            self.pos += 1
        else:
            self.pos = 0

        self.imageLabel.setPixmap(self.qpm)
        self.imageLabel2.setPixmap(self.qpm2)
        self.imageLabel3.setPixmap(self.qpm3)
        self.imageLabel4.setPixmap(self.qpm4)
示例#11
0
class GenUtil:
    def __init__(self, naoMotions):
        # no inits
        self.emotionExpressionDict = ["Happy", "Hopeful", "Sad", "Fearful", "Angry",
                                      "Happy2", "Hopeful2", "Sad2", "Fearful2", "Angry2",
                                      "Happy3", "Hopeful3", "Hopeful4",
                                      "Scared1", "Scared2", "Scared3"] #(pickup, touch, high)
                                    
        self.numOE = len(self.emotionExpressionDict)
        self.naoMotions = naoMotions
        self.naoIsSafe = True
        self.naoIsTouched = False
        self.naoIsPickedUp = False
        self.naoIsHighEdge = False

        self.naoSeesHigh = False
        self.naoSeesHighTest = False
        self.waitingForInput = False

        self.wasJustScared = False
        self.scaredAllowed = True
        self.fDB = FoodDBManager()

        NAOip, NAOport = naoMotions.getConnectInfo()
        try:
            self.edgeDetector = EdgeDetection(NAOip, NAOport)
        except Exception as e:
            print "Edge Detector Failed to Create, possibly no camera on unit"
            print e

    def toNum(self, numAsString):
        # Convert string to either int or float
        try:
            ret = int(float(numAsString))
        except ValueError:
            print "***** This string is not a number: ", numAsString
            ret = numAsString
        return ret

    def expressEmotion(self, obserExpresNum = -1):
        print "********************* Yay I can Express myself"
        print "OE Num: ", obserExpresNum
        # expresses NAOs emotion through what ever was picked as the observable expression
        if obserExpresNum == -1:
            print "Neutral ", "Face"
            # self.naoEmotionalVoiceSay("I am " + "Neutral", obserExpresNum)
        else:
            oe = self.emotionExpressionDict[obserExpresNum]
            print oe, "Face"
            # self.naoEmotionalVoiceSay("I am expressing " + oe, obserExpresNum)

            if "Happy" in oe:
                self.showHappyEyes()
            elif "Sad" in oe:
                self.showSadEyes()
            elif "Fearful" in oe:
                self.showFearEyes()
            elif "Angry" in oe:
                self.showAngryEyes()
            elif "Hopeful" in oe:
                self.showHopeEyes()
            elif "Scared" in oe:
                self.showScaredEyes(oe)

            sitTest = False
            if not sitTest:
                if not self.wasJustScared:
                    self.naoMotions.naoStand()
                    self.naoMotions.naoAliveOff()

                if oe == "Happy":
                    self.showHappy1Body()
                elif oe == "Happy2":
                    self.showHappy2Body()
                elif oe == "Happy3":# not used anymore
                    self.showHappy3Body()
                elif oe == "Sad":
                    self.showSadBody()
                elif oe == "Sad2":
                    self.showSad2Body()
                elif oe == "Fearful":
                    self.showFearBody()
                elif oe == "Fearful2":
                    self.showFearBody2()
                elif oe == "Angry":
                    self.showAngryBody()
                elif oe == "Angry2":
                    self.showAngryBody2()
                elif oe == "Hopeful":
                    self.showHopeBody()
                elif oe == "Hopeful2":
                    self.showHopeBody2()
                elif oe == "Hopeful3":# not used
                    self.showHopeBody3()
                elif oe == "Hopeful4":#not used
                    self.showHopeBody4()
                elif oe == "Scared1" and not self.wasJustScared: #pickup
                    self.showScared1Body()
                    self.wasJustScared = True
                elif oe == "Scared2" and not self.wasJustScared: #touch
                    self.showScared2Body()
                    self.wasJustScared = True
                elif oe == "Scared3" and not self.wasJustScared: #high
                    self.showScared3Body()
                    self.wasJustScared = True

                if "Scared" not in oe:
                    self.naoMotions.naoStand()
                    self.naoMotions.naoAliveON()
                    self.wasJustScared = False
                    self.scaredAllowed = True
                else:
                    self.scaredAllowed = False # obsolete

            # if oe == "Scared2" and self.naoIsTouched and False:
            #     self.naoIsSafeAgain() # undo the scared after being touched
            

    def naoEmotionalSay(self, sayText, sayEmotionExpres = -1, moveIterations=1):
#        print "Yay I get to Express myself through sound!"
        # make nao say something in an emotional manner
        if sayEmotionExpres == -1:
            print "Neutral Voice"
            # self.naoMotions.naoWaveRightSay(sayText, sayEmotionExpres, 0, moveIterations)
            # self.naoMotions.naoShadeHeadSay(sayText)
            self.naoEmotionalVoiceSay(sayText, sayEmotionExpres)
        else:
            oe = self.emotionExpressionDict[sayEmotionExpres]
            print oe, "Voice"
            # self.naoMotions.naoWaveRightSay(sayText, sayEmotionExpres,(sayEmotionExpres+1.0)/(self.numOE+1), 1)
            # self.naoMotions.naoShadeHeadSay(sayText)
            self.naoEmotionalVoiceSay(sayText, sayEmotionExpres)

    def naoEmotionalVoiceSay(self, sayText, sayEmotionExpres = -1):
        oe = self.emotionExpressionDict[sayEmotionExpres]
        # print "My voice is: ", oe
        if "Happy" in oe:
            self.showHappyVoice(sayText)
        elif "Sad" in oe:
            self.showSadVoice(sayText)
        elif "Fearful" in oe:
            self.showFearVoice(sayText)
        elif "Angry" in oe:
            self.showAngryVoice(sayText)
        elif "Hopeful" in oe:
            self.showHopeVoice(sayText)
        elif "Scared" in oe:
            self.showScaredVoice(sayText)

################################################ Eyes
    def showHappyEyes(self):
        self.naoMotions.setEyeEmotion('happy')
        print "My eyes are Happy"
       
    def showSadEyes(self):
        self.naoMotions.setEyeEmotion('sad')
        print "My eyes are Sad"
        
    def showFearEyes(self):
        self.naoMotions.setEyeEmotion('fear')
        print "My eyes are Fear"
            
    def showAngryEyes(self):
        self.naoMotions.setEyeEmotion('anger')
        print "My eyes are Angry"

    def showHopeEyes(self):
        self.naoMotions.setEyeEmotion('hope')
        print "My eyes are Hopeful"
     
    def showScaredEyes(self, expression = 'scared1'):
        # self.naoMotions.setEyeEmotion('scared1')
        self.naoMotions.blinkAlarmingEyes(2*3, expression.lower())
        self.naoMotions.setEyeEmotion(expression)
        print "My eyes are Scared"

################################################# Body
    def showHappy1Body(self):
        self.naoMotions.happyEmotion() # switch to new one
        print "My body is Happy1"

    def showHappy2Body(self):
        self.naoMotions.happy3Emotion()
        print "My body is Happy2"

    def showHappy3Body(self): # not used anymore
        self.naoMotions.happy3Emotion()
        print "My body is Happy3"

    def showSadBody(self):
        self.naoMotions.sad2Emotion()
        print "My body is Sad"

    def showSad2Body(self):
        self.naoMotions.sadEmotion()
        print "My body is Sad2"
        
    def showFearBody(self):
        self.naoMotions.fear2Emotion()
        print "My body is Fear1"

    def showFearBody2(self):
        self.naoMotions.fearEmotion()
        print "My body is Fear2"
            
    def showAngryBody(self):
        self.naoMotions.angerEmotion()
        print "My body is Angry"

    def showAngryBody2(self):
        self.naoMotions.anger2Emotion()
        print "My body is Angry2"

    def showHopeBody(self):
        self.naoMotions.hopeEmotion()
        print "My body is Hopeful1"

    def showHopeBody2(self):
        self.naoMotions.hope2Emotion()
        print "My body is Hopeful2"

    def showHopeBody3(self):
        self.naoMotions.hope3Emotion()
        print "My body is Hopeful3"

    def showHopeBody4(self):
        self.naoMotions.hope4Emotion()
        print "My body is Hopeful4"

    def showScared1Body(self):
        self.naoMotions.scaredEmotion3()
        print "My body is Scared1"

    def showScared2Body(self):
        self.naoMotions.scaredEmotion1()
        print "My body is Scared2"
    def showScared3Body(self):
        self.naoMotions.scaredEmotion3Edge()
        print "My body is Scared3"

################################################# Body
    def showHappyVoice(self, sayText):
        self.naoMotions.naoSayHappy(sayText)
        print "My voice is Happy"

    def showSadVoice(self, sayText):
        self.naoMotions.naoSaySad(sayText)
        print "My voice is Sad"

    def showFearVoice(self, sayText):
        self.naoMotions.naoSayFear(sayText)
        print "My voice is Fear"

    def showAngryVoice(self, sayText):
        self.naoMotions.naoSayAnger(sayText)
        print "My voice is Angry"

    def showHopeVoice(self, sayText):
        self.naoMotions.naoSayHope(sayText)
        print "My voice is Hopeful"

    def showScaredVoice(self, sayText):
        self.naoMotions.naoSayScared(sayText)
        print "My voice is Scared"




    def naoWasTouched(self, touchedWhere = ""):
        if not self.naoIsTouched:
            print "**********************************"
            print "I was TOUCHED !!"
            print "**********************************"
            self.naoIsSafe = False
            self.naoIsTouched = True
            self.stopNAOActions()
            self.naoMotions.naoAliveOff()
            self.showScaredEyes('scared2')
            if self.waitingForInput:
                FileUtilitiy.hitEnterOnConsole()
            self.showScaredVoice("I was Touched! " + touchedWhere)
        else:
            print "********************************** I was already touched"

    def naoWasPickedUp(self):
        if not self.naoIsPickedUp:
            print "**********************************"
            print "I was PICKED UP !!"
            print "**********************************"
            self.naoIsSafe = False
            self.naoIsPickedUp = True
            self.stopNAOActions()
            self.naoMotions.naoAliveOff()
            self.showScaredEyes('scared1')
            if self.waitingForInput:
                FileUtilitiy.hitEnterOnConsole()
            self.showScaredVoice("I was Picked up!")
        else:
            print " ********************************** I was already picked up"

    def naoSeesHighEdge(self):
        if not self.naoIsHighEdge:
            print "**********************************"
            print "I'm TOO CLOSE to the edge!!"
            print "**********************************"
            self.naoIsSafe = False
            self.naoIsHighEdge = True
            self.stopNAOActions()
            self.naoMotions.naoAliveOff()
            self.showScaredEyes('scared3')
            self.showScaredVoice("I'm so high up!")
        else:
            print " ********************************** I already see I'm high up"

    def naoIsSafeAgain(self):
        print "**********************************"
        print "Much Better"
        print "**********************************"
        self.naoIsSafe = True
        self.naoIsTouched = False
        self.naoIsPickedUp = False
        self.naoIsHighEdge = False
        self.naoSeesHighTest = False
        self.scaredAllowed = True

    def getTimeStamp(self):
        return time.time()

    def getDateTimeFromTime(self, timeStamp):
        return datetime.datetime.fromtimestamp(timeStamp).strftime('%Y-%m-%d_%H-%M-%S')

    def getDateTime(self):
        ts = time.time()
        return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')

    def checkSafety(self):
        return self.naoIsSafe

    def stopNAOActions(self):
        self.naoMotions.stopNAOActions()

    def showFoodDB(self):
        self.fDB.showDB()

    def foodDBSelectWhere(self, mealType = "lunch", canEatPoultry = True, canEatGluten = True, canEatFish = True,
                          strictIngredients = True):
        return self.fDB.selectDBWhere(mealType, canEatPoultry, canEatGluten, canEatFish, strictIngredients)

    def dbRowToDict(self, row):
        return self.fDB.dict_factory(row)

    def randMeal(self, mealsTries, mealPos):
        keepLooping = True
        ranMeal = 0
        while keepLooping:
            ranMeal = random.randint(0, len(mealPos)-1)
            ranIndex = self.dbRowToDict(mealPos[ranMeal])['id']
            # print mealPos
            # print "ranMeal: ", ranMeal, " ranIndex: ", ranIndex, " mealsTries: ", mealsTries
            keepLooping = (ranIndex) in mealsTries

        return ranMeal

    def naoTurnOffEyes(self):
        self.naoMotions.naoTurnOffEyes()

    def naoWave(self):
        self.naoMotions.naoWaveRight()

    # def naoWaveSay(self):
    #     self.naoMotions.naoWaveRightFirst()
    #     self.naoMotions.naoWaveRightSecond()

    def checkEdgeSafety(self):
        # check if NAO is far enough away from the edge
        # self.naoMotions.naoShakeHead()

        self.naoMotions.naoAliveOff()
        self.naoMotions.LookAtEdgeMotion()
        self.naoSeesHigh = False
        thres = 20
        nFrames = 4
        try:
            status, distance, angle = self.edgeDetector.lookForEdge(thres, nFrames)
            print "Status: ", status, "Distance: ", distance, " Angle: ", angle
            if distance is not None:
                self.naoSeesHigh = status
        except Exception as e:
            print "Edge Detection failed"
            print e

        self.naoSeesHigh = self.naoSeesHigh or self.naoSeesHighTest
        if self.naoSeesHigh:
            print "NAO is too close to the edge"
            self.naoSeesHighEdge()
        else:
            print "NAO is a safe distance from the edge"

        self.naoMotions.naoStand()
        self.naoMotions.naoAliveON()
        return self.naoSeesHigh

    def testExpressions(self):
        expressionDict = range(0,9+1)

        for e in expressionDict:
            self.expressEmotion(int(e))
            # time.sleep(2)

        self.naoMotions.naoStand()
        self.naoMotions.naoAliveOff()
        self.showScared1Body()
        self.naoMotions.naoStand()
        self.naoMotions.naoAliveON()
        # time.sleep(2)
        self.naoMotions.naoStand()
        self.naoMotions.naoAliveOff()
        self.showScared2Body()
        self.naoMotions.naoStand()
        self.naoMotions.naoAliveON()
        # time.sleep(2)
        self.naoMotions.naoStand()
        self.naoMotions.naoAliveOff()
        self.showScared3Body()
        self.naoMotions.naoStand()
        self.naoMotions.naoAliveON()
        # time.sleep(2)

        print "Done testing"
示例#12
0
class LaneDetectionPipeline(object):
    """
    this is the pipeline for detecting lane
    """
    def __init__(self):
        self.cam_calib = None
        self.edge_detection = None
        self.perspec_trans = None
        self.line_detection = None
        # uses movie processing
        self.left = Line()
        self.right = Line()

    def configure_auto(self):
        # Camera Calibration
        self.cam_calib = CameraCalib(9, 6)
        self.cam_calib.import_calib("calibration_pickle.p")
        # Edge Detection with gradient and color space
        self.edge_detection = EdgeDetection()
        # add function
        self.edge_detection.add_func(Sobel(7, 20, 120, 'x'))
        self.edge_detection.add_func(SobelGratitude(5, 70, 100))
        self.edge_detection.add_func(AbstHLSElement(160, 255, 'S'))
        # Perspective Transformation
        src_range = np.float32([[550, 470], [200, 700], [1100, 700],
                                [750, 470]])
        dst_range = np.float32([[180, 0], [180, 700], [1100, 700], [1100, 0]])
        self.perspec_trans = PerspectiveTransform(src_range, dst_range)
        # Line Detection
        self.line_detection = LineDetection(margin=50)

    def _draw_lane_to_image(self, undist_img, binary_warped, leftpoly,
                            rightpoly):
        ploty = np.linspace(0, binary_warped.shape[0] - 1,
                            binary_warped.shape[0])
        left_fitx = leftpoly.deduce(ploty)
        right_fitx = rightpoly.deduce(ploty)
        # Create an image to draw the lines on
        warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
        color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

        # Recast the x and y points into usable format for cv2.fillPoly()
        pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
        pts_right = np.array(
            [np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
        pts = np.hstack((pts_left, pts_right))

        # Draw the lane onto the warped blank image
        cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

        # Warp the blank back to original image space using inverse perspective matrix (Minv)
        newwarp = self.perspec_trans.reverse_img(color_warp)
        # Combine the result with the original image
        result = cv2.addWeighted(undist_img, 1, newwarp, 0.3, 0)

        # add curvature information to image
        # calculates center of lane's curvature
        curvature = get_lane_curvature(result, leftpoly, rightpoly)
        curve_msg = "Radius of Curvature: {} (m)".format(curvature)
        side, center_of_lane = get_distance_to_center(result, leftpoly,
                                                      rightpoly)
        dist_msg = "Vehicle is {} m {} of center".format(center_of_lane, side)

        cv2.putText(result,
                    curve_msg, (100, 100),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1.0, (255, 255, 255),
                    thickness=2)
        cv2.putText(result,
                    dist_msg, (100, 150),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1.0, (255, 255, 255),
                    thickness=2)

        return result

    def process_image_pic(self, src_img):
        undist_img = self.cam_calib.undist_img(src_img)
        out_img = self.edge_detection.execute(undist_img)
        out_img = self.perspec_trans.warp_img(out_img)
        leftx, lefty, rightx, righty = self.line_detection.find_lane_pixels(
            out_img)
        leftpoly = LinePolynomial(2)
        rightpoly = LinePolynomial(2)
        leftpoly.fit(lefty, leftx)
        rightpoly.fit(righty, rightx)

        leftx, lefty, rightx, righty = self.line_detection.search_around_poly(
            out_img, leftpoly, rightpoly)
        leftpoly.fit(lefty, leftx)
        rightpoly.fit(righty, rightx)

        left_curve = LineCurvature()
        right_curve = LineCurvature()
        left_curve.set_plot(out_img, lefty, leftx)
        right_curve.set_plot(out_img, righty, rightx)
        # get curvature

        out_img = self._draw_lane_to_image(undist_img, out_img, leftpoly,
                                           rightpoly)
        return out_img

    def process_img_movie(self, src_img):
        # camera calibration, edge_detection, perspective transformation.
        undist_img = self.cam_calib.undist_img(src_img)
        out_img = self.edge_detection.execute(undist_img)
        out_img = self.perspec_trans.warp_img(out_img)
        # if lane detection succeeded at last cycle, fitting only used for finding lane in this cycle
        # but otherwise uses histogram based approach
        # here, all pixels around the line are detected.
        if self.left.detected == True and self.right.detected == True:
            leftx, lefty, rightx, righty = self.line_detection.search_around_poly(
                out_img, self.left.linepoly, self.right.linepoly)
        else:
            leftx, lefty, rightx, righty = self.line_detection.find_lane_pixels(
                out_img)
        #
        self.left.linepoly.fit(lefty, leftx)
        self.right.linepoly.fit(righty, rightx)
        # calculate curvature of both right line and left line
        left_curve = LineCurvature()
        right_curve = LineCurvature()
        left_curve.set_plot(out_img, lefty, leftx)
        right_curve.set_plot(out_img, righty, rightx)
        # sanity check execution such as curvature, distance and
        detected = sanity_check(out_img, self.left.linepoly,
                                self.right.linepoly, left_curve, right_curve)
        # update line detection information
        self.left.update_lines(detected, out_img, leftx, lefty,
                               left_curve.curverad_real)
        self.right.update_lines(detected, out_img, rightx, righty,
                                right_curve.curverad_real)
        left_fitx = None
        right_fitx = None
        ploty = np.linspace(0, out_img.shape[0] - 1, out_img.shape[0])
        # if detected line does not exist, uses recent_xfitted
        # if recent_xfitted does not exist, current_fit will be used for substitution(giving up).
        if detected == False:
            if len(self.left.recent_xfitted) > 0:
                left_fitx = self.left.recent_xfitted[-1]
            else:
                interpolatepoly = LinePolynomial()
                interpolatepoly.fit_coef = self.left.current_fit
                left_fitx = interpolatepoly.deduce(ploty)
            if len(self.right.recent_xfitted) > 0:
                right_fitx = self.right.recent_xfitted[-1]
            else:
                interpolatepoly = LinePolynomial()
                interpolatepoly.fit_coef = self.right.current_fit
                right_fitx = interpolatepoly.deduce(ploty)
        else:
            # get fresh fitted data
            left_fitx = self.left.recent_xfitted[-1]
            right_fitx = self.right.recent_xfitted[-1]
        # fitting again
        leftpoly = LinePolynomial()
        leftpoly.fit(ploty, left_fitx)
        rightpoly = LinePolynomial()
        rightpoly.fit(ploty, right_fitx)
        # draw lane to image
        out_img = self._draw_lane_to_image(undist_img, out_img, leftpoly,
                                           rightpoly)

        return out_img
if __name__ == '__main__':

    import glob
    from CameraCalib import CameraCalib
    from EdgeDetection import EdgeDetection
    import matplotlib.pyplot as plt
    import matplotlib.image as mpimg
    from ColorElementCollection import AbstGray, AbstRGBElement, AbstHLSElement
    from SobelCollection import Sobel, SobelGratitude, SobelDirection
    import copy

    cam_calib = CameraCalib(9, 6)
    src_image_file = glob.glob('../test_images/*.jpg')
    cam_calib.import_calib("calibration_pickle.p")

    edge_detection = EdgeDetection()
    # add function
    edge_detection.add_func(Sobel(5, 30, 100, 'x'))
    edge_detection.add_func(AbstHLSElement(160, 255, 'S'))

    src_range = np.float32([[550, 470], [220, 680], [1150, 680], [750, 470]])
    dst_range = np.float32([[200, 0], [200, 700], [1100, 700], [1100, 100]])

    ptransform = PerspectiveTransform(src_range, dst_range)

    for idx, fname in enumerate(src_image_file):

        org_img = mpimg.imread(fname)
        calibed_img = cam_calib.undist_img_file(fname)
        edet_img = edge_detection.execute(calibed_img)
        interest_img = copy.deepcopy(org_img)
from ReadBmp import ReadBmp
from Noise import Noise
from WaveFilter import WaveFilter
from EdgeDetection import EdgeDetection

# 创建高斯噪声图像
noise = Noise()
noise.GaussianNoise("1.bmp", "2.bmp")
# 创建椒盐噪声图片
noise.SaltAndPepperNoise("1.bmp", "3.bmp")

# 对椒盐噪声图像进行均值滤波
filter = WaveFilter()
filter.MeanFilter("3.bmp", "4.bmp")
# 对椒盐噪声图像进行中值滤波
filter.MedianFilter("3.bmp", "5.bmp")

# Sobel算子边缘检测
detection = EdgeDetection()
detection.Sobel("1.bmp", "6.bmp")
# Roberts算子边缘检测
detection.Roberts("1.bmp", "7.bmp")
示例#15
0
  def compute(x, darray, attribute_class, attribute_type, kernel, 
              sample_rate, dip_factor, axis):

    if attribute_class == 'Amplitude':

      if attribute_type == 'fder':
        result = SignalProcess.first_derivative(x, darray, axis=-1, 
                                                preview=None)
        return(result)
      
      if attribute_type == 'sder':
        result = SignalProcess.second_derivative(x, darray, axis=-1, 
                                                 preview=None)
        return(result)

      if attribute_type == 'rms':
        result = SignalProcess.rms(x, darray, kernel=(1,1,9), 
                                   preview=None)
        return(result)

      if attribute_type == 'gradmag':
        result = SignalProcess.gradient_magnitude(x, darray, sigmas=(1,1,1), 
                                                  preview=None)
        return(result)

      if attribute_type == 'reflin':
        result = SignalProcess.reflection_intensity(x, darray, kernel=(1,1,9), 
                                                    preview=None)
        return(result)

    if attribute_class == 'CompleTrace':  

      if attribute_type == 'enve':
        result = ComplexAttributes.envelope(x, darray, preview=None)
        return(result)

      if attribute_type == 'inphase':
        result = ComplexAttributes.instantaneous_phase(x, darray, preview=None)   
        return(result)

      if attribute_type == 'cosphase':
        result = ComplexAttributes.cosine_instantaneous_phase(x, darray, 
                                                              preview=None)   
        return(result)

      if attribute_type == 'ampcontrast':
        result = ComplexAttributes.relative_amplitude_change(x, darray, 
                                                             preview=None)
        return(result)

      if attribute_type == 'ampacc':
        result = ComplexAttributes.amplitude_acceleration(x, darray, 
                                                          preview=None)
        return(result)

      if attribute_type == 'infreq':
        result = ComplexAttributes.instantaneous_frequency(x, darray, 
                                                           sample_rate=4, 
                                                           preview=None)
        return(result)

      if attribute_type == 'inband':
        result = ComplexAttributes.instantaneous_bandwidth(x, darray, 
                                                           preview=None)
        return(result)

      if attribute_type == 'domfreq':
        result = ComplexAttributes.dominant_frequency(x, darray, sample_rate=4, 
                                                      preview=None)
        return(result)

      if attribute_type == 'freqcontrast':
        result = ComplexAttributes.dominant_frequency(x, darray, sample_rate=4, 
                                                      preview=None)
        return(result)

      if attribute_type == 'sweet':
        result = ComplexAttributes.sweetness(x, darray, sample_rate=4, 
                                             preview=None)
        return(result)

      if attribute_type == 'quality':
        result = ComplexAttributes.quality_factor(x, darray, sample_rate=4, 
                                                  preview=None)
        return(result)

      if attribute_type == 'resphase':
        result = ComplexAttributes.response_phase(x, darray, preview=None)
        return(result)

      if attribute_type == 'resfreq':
        result = ComplexAttributes.response_frequency(x, darray, sample_rate=4, 
                                                      preview=None)
        return(result) 

      if attribute_type == 'resamp':
        result = ComplexAttributes.response_amplitude(x, darray, preview=None)
        return(result)

      if attribute_type == 'apolar':
        result = ComplexAttributes.apparent_polarity(x, darray, preview=None)
        return(result)

    if attribute_class == 'DipAzm':

      if attribute_type == 'dipgrad':
        result = DipAzm.gradient_dips(x, darray, dip_factor=10, kernel=(3,3,3), 
                                      preview=None)
        return(result) # result is il_dip, xl_dip

      if attribute_type == 'gst':
        result = DipAzm.gradient_structure_tensor(x, darray, kernel, 
                                                  preview=None)
        return(result) # result is gi2, gj2, gk2, gigj, gigk, gjgk
      
      if attribute_type == 'gstdip2d':
        result = DipAzm.gst_2D_dips(x, darray, dip_factor=10, kernel=(3,3,3), 
                                    preview=None)
        return(result) # result is il_dip, xl_dip

      if attribute_type == 'gstdip3d':
        result = DipAzm.gst_3D_dip(x, darray, dip_factor=10, kernel=(3,3,3), 
                                   preview=None)
        return(result)

      if attribute_type == 'gstazm3d':
        result = DipAzm.gst_3D_azm(x, darray, dip_factor=10, kernel=(3,3,3), 
                                   preview=None)
        return(result)


    if attribute_class == 'EdgeDetection':  

      if attribute_type == 'semblance':
        result = EdgeDetection.semblance(x, darray, kernel=(3,3,9), 
                                         preview=None)
        return(result)

      if attribute_type == 'gstdisc':
        result = EdgeDetection.gradient_structure_tensor(x, darray, 
                                                         kernel=(3,3,9), 
                                                         preview=None)
        return(result)

      if attribute_type == 'eigen':
        result = EdgeDetection.eig_complex(x, darray, kernel=(3,3,9), 
                                           preview=None)
        return(result)

      if attribute_type == 'chaos':
        result = EdgeDetection.chaos(x, darray, kernel=(3,3,9), 
                                     preview=None)
        return(result)

      if attribute_type == 'curv':
        # compute first inline and xline dips from gst
        x = DipAzm()
        darray_il, darray_xl = DipAzm.gradient_dips(x, darray, dip_factor=10, 
                                                    kernel=(3,3,3), 
                                                    preview=None)
        # compute curvature
        result = EdgeDetection.volume_curvature(x, darray_il, darray_xl, 
                                                dip_factor=10, 
                                                kernel=(3,3,3), 
                                                preview=None) 
        return(result) # result is H, K, Kmax, Kmin, KMPos, KMNeg
示例#16
0
        cv2.putText(frame, 'No game found ', (9, 28), font, 0.55, colour, 2,
                    cv2.LINE_AA)


if __name__ == '__main__':

    # initiate web scraping object
    myScraper = Scraper()
    # avatar database access object
    myDb = DataBase('avatar.db')
    # collector database object
    collectordb = CollectorDB('collector.db')
    # image correction object
    imageCorrection = ImageCorrection()
    # edge detection object
    edge_detection = EdgeDetection()
    # image warping object
    image_warper = ImageWarper()
    # image hashing object
    image_hasher = ImageHasher()
    # root for Tk object
    root = Tk()
    topframe = Frame(root)
    topLabel = Label(root, text="What would you like to do?")
    topLabel.grid(row=0, column=0)
    view = Button(root, text="View collection", command=get_colls)
    view.grid(row=1, column=0)
    scan = Button(root, text="Scan a game", command=scan_game)
    scan.grid(row=1, column=1)
    root.title("AVATAR")
    root.mainloop()
示例#17
0
if __name__ == '__main__':
    import glob
    from CameraCalib import CameraCalib
    from EdgeDetection import EdgeDetection
    import matplotlib.pyplot as plt
    import matplotlib.image as mpimg
    from ColorElementCollection import AbstGray, AbstRGBElement, AbstHLSElement
    from SobelCollection import Sobel, SobelGratitude, SobelDirection
    from PerspectiveTransform import PerspectiveTransform
    import copy

    cam_calib = CameraCalib(9, 6)
    src_image_file = glob.glob('../test_images/*.jpg')
    cam_calib.import_calib("calibration_pickle.p")

    edge_detection = EdgeDetection()
    # add function
    edge_detection.add_func(Sobel(7, 20, 120, 'x'))
    edge_detection.add_func(SobelGratitude(5, 70, 100))
    edge_detection.add_func(AbstHLSElement(170, 255, 'S'))

    src_range = np.float32([[550, 480], [220, 700], [1150, 700], [750, 480]])
    dst_range = np.float32([[180, 0], [180, 700], [1100, 700], [1100, 100]])
    src_range = np.float32([[550, 470], [200, 700], [1100, 700], [750, 470]])
    dst_range = np.float32([[180, 0], [180, 700], [1100, 700], [1100, 0]])

    ptransform = PerspectiveTransform(src_range, dst_range)
    ldetect = LineDetection(margin=50)
    for idx, fname in enumerate(src_image_file):

        org_img = mpimg.imread(fname)