コード例 #1
0
 def getNewTalks(self):
     talkContainers = SoupStrainer(attrs = {'class':re.compile('talkMedallion')})
     for talk in BeautifulSoup(self.html, parseOnlyThese = talkContainers):
         link = URLTED+talk.dt.a['href']
         title = cleanHTML(talk.dt.a['title'])
         pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
         yield {'url':link, 'Title':title, 'Thumb':pic}
コード例 #2
0
 def getTalks(self):
     talkContainer = SoupStrainer(attrs = {'class':re.compile('box clearfix')})
     for talk in BeautifulSoup(self.html, parseOnlyThese = talkContainer):
         title = talk.h4.a.string
         link = URLTED+talk.dt.a['href']
         pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
         yield {'url':link, 'Title':title, 'Thumb':pic}
コード例 #3
0
 def getNewTalks(self):
     talkContainers = SoupStrainer(attrs = {'class':re.compile('talkMedallion')})
     for talk in BeautifulSoup(self.html, parseOnlyThese = talkContainers):
         link = URLTED+talk.dt.a['href']
         title = cleanHTML(talk.dt.a['title'])
         pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
         yield {'url':link, 'Title':title, 'Thumb':pic}
コード例 #4
0
    def detect_traffic_sign(self, frame):
        img, tf_sign = None, ''
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, self.low_range, self.high_range)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, self.kernel)
        mask = cv2.dilate(mask, self.kernel, iterations=1)
        contours = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[-2]

        for c in contours:
            area = cv2.contourArea(c)
            if (area > 400):
                x, y, w, h = cv2.boundingRect(c)
                if (abs(w - h) < (w + h) / 10):
                    img = frame[y:y + h, x:x + w]
                    imagePredict = util.resizeImage(img)
                    imagePredict = imagePredict / 255.0
                    start = time.time()
                    predicted = self.model.predict(np.array([imagePredict]))
                    predicted = round(predicted[0][0], 2)

                    if 0.0 <= predicted <= 0.2:
                        tf_sign = 'straight'
                    if 0.8 <= predicted <= 1.0:
                        tf_sign = 'turn'
                    print("{}\t{}\t{}".format(predicted, tf_sign,
                                              time.time() - start))

        return img, tf_sign
コード例 #5
0
 def getTalks(self):
     talkContainer = SoupStrainer(attrs = {'class':re.compile('box clearfix')})
     for talk in BeautifulSoup(self.html, parseOnlyThese = talkContainer):
         title = talk.h4.a.string
         link = URLTED+talk.dt.a['href']
         pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
         yield {'url':link, 'Title':title, 'Thumb':pic}
コード例 #6
0
    def get_talks(self, url):
        url = url + "?page=%s"
        page_index = 1
        # Have to know when to stop paging, see condition for loop exit below.
        found_titles = set()
        found_on_last_page = 0

        html = self.get_HTML(url % (page_index))

        while True:
            containers = SoupStrainer('dl', {'class':re.compile('talkMedallion')})
            found_on_this_page = 0

            for talk in BeautifulSoup(html, parseOnlyThese=containers):
                a_tag = talk.dt.a
                title = a_tag['title'].strip()
                if title not in found_titles:
                    found_titles.add(title)
                    found_on_this_page += 1
                    link = a_tag['href']
                    img_tag = a_tag.find('img', {'src':re.compile('http://images\.ted\.com/')})
                    img = img_tag['src']
                    yield title, URLTED + link, resizeImage(img)

            # Results on last page == results on (last page + 1), _not_ 0 as you might hope.
            # The second clause allows us to skip looking at last page + 1 if the last page contains
            # fewer results than that before it; which is usually but not always the case.
            if found_on_this_page and found_on_this_page >= found_on_last_page:
                page_index += 1
                found_on_last_page = found_on_this_page
                html = self.get_HTML(url % (page_index))
            else:
                break
コード例 #7
0
 def getFavoriteTalks(self, userID, url = URLFAVORITES):
     if userID:
         html = self.get_HTML(url + userID)
         talkContainer = SoupStrainer(attrs = {'class':re.compile('box clearfix')})
         for talk in BeautifulSoup(html, parseOnlyThese = talkContainer):
             title = talk.ul.a.string
             link = URLTED+talk.dt.a['href']
             pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
             yield {'url':link, 'Title':title, 'Thumb':pic}
     else:
         self.logger('invalid user object')
コード例 #8
0
 def getFavoriteTalks(self, userID, url=URLFAVORITES):
     if userID:
         html = self.get_HTML(url + userID)
         talkContainer = SoupStrainer(attrs={'class':re.compile('col clearfix')})
         for talk in BeautifulSoup(html, parseOnlyThese=talkContainer):
             title = talk.a['title']
             link = URLTED + talk.a['href']
             img = resizeImage(talk.a.img['src'])
             yield title, link, img
     else:
         self.logger('invalid user object')
コード例 #9
0
 def getFavoriteTalks(self, user, url = URLFAVORITES):
     """user must be TedTalks().User object with .id attribute"""
     if user.id is not None:
         html = getHTML(url+user.id)
         talkContainer = SoupStrainer(attrs = {'class':re.compile('box clearfix')})
         for talk in BeautifulSoup(html, parseOnlyThese = talkContainer):
             title = talk.h4.a.string
             link = URLTED+talk.dt.a['href']
             pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
             yield {'url':link, 'Title':title, 'Thumb':pic}
     else:
         print '[%s] %s invalid user object' % (pluginName, __name__)
コード例 #10
0
 def getFavoriteTalks(self, user, url = URLFAVORITES):
     """user must be TedTalks().User object with .id attribute"""
     if user.id is not None:
         html = getHTML(url+user.id)
         talkContainer = SoupStrainer(attrs = {'class':re.compile('box clearfix')})
         for talk in BeautifulSoup(html, parseOnlyThese = talkContainer):
             title = talk.h4.a.string
             link = URLTED+talk.dt.a['href']
             pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
             yield {'url':link, 'Title':title, 'Thumb':pic}
     else:
         print '[%s] %s invalid user object' % (pluginName, __name__)
コード例 #11
0
 def getTalks(self):
     # themes loaded with a json call. Why are they not more consistant?
     from simplejson import loads
     # search HTML for the link to tedtalk's "api".  It is easier to use regex here than BS.
     jsonUrl = URLTED+re.findall('DataSource\("(.+?)"', self.html)[0]
     # make a dict from the json formatted string from above url
     talksMarkup = loads(getHTML(jsonUrl))
     # parse through said dict for all the metadata
     for markup in talksMarkup['resultSet']['result']:
         talk = BeautifulSoup(markup['markup'])
         link = URLTED+talk.dt.a['href']
         title = cleanHTML(talk.dt.a['title'])
         pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
         yield {'url':link, 'Title':title, 'Thumb':pic}
コード例 #12
0
 def getTalks(self):
     # themes loaded with a json call. Why are they not more consistant?
     from simplejson import loads
     # search HTML for the link to tedtalk's "api".  It is easier to use regex here than BS.
     jsonUrl = URLTED+re.findall('DataSource\("(.+?)"', self.html)[0]
     # make a dict from the json formatted string from above url
     talksMarkup = loads(getHTML(jsonUrl))
     # parse through said dict for all the metadata
     for markup in talksMarkup['resultSet']['result']:
         talk = BeautifulSoup(markup['markup'])
         link = URLTED+talk.dt.a['href']
         title = cleanHTML(talk.dt.a['title'])
         pic = resizeImage(talk.find('img', attrs = {'src':re.compile('.+?\.jpg')})['src'])
         yield {'url':link, 'Title':title, 'Thumb':pic}
コード例 #13
0
    def preprocess(self, boxlabel_df, sampleFiles, shrink):
        # First create a list with required length
        self.dataStore = [None] * len(sampleFiles)
        sampleCount = len(sampleFiles)
        for fileIndex in range(sampleCount):
            print("Preprocess: {}/{}    \r".format(fileIndex, sampleCount),
                  end="",
                  file=sys.stderr)
            fileName = sampleFiles.iloc[fileIndex].TrainImageId
            # Load and resize image
            img = sd_util.loadImage(
                os.path.join(sd_util.TRAIN_IMAGES, fileName))
            if shrink != 1:
                img = sd_util.resizeImage(img, self.IMAGE_H, self.IMAGE_W)
            img = img.reshape((self.IMAGE_H, self.IMAGE_W, 1))
            img -= 88  # The average grayscale of images is 88

            selectedRows = boxlabel_df[boxlabel_df.ImageId == fileName]
            defectClasses = [None] * sd_util.NUM_CLASS
            margins = [self.IMAGE_W, 0, self.IMAGE_H, 0]
            for classIndex in range(sd_util.NUM_CLASS):
                if selectedRows.iloc[classIndex].hasBox:
                    encodedBoxes = selectedRows.iloc[classIndex].EncodedBoxes
                    rects = sd_util.encoded2rects(encodedBoxes)
                    temp = np.zeros(
                        (len(rects), 4))  # Convert it from tuple to np matrix
                    temp[:, :] = rects
                    rects = temp
                    if shrink != 1:
                        rects /= shrink

                    defectClasses[classIndex] = rects
                    if np.min(rects[:, 0]) < margins[0]:
                        margins[0] = np.min(rects[:, 0])
                    if np.min(rects[:, 1]) < margins[1]:
                        margins[1] = np.min(rects[:, 1])
                    if np.max(rects[:, 2]) > margins[2]:
                        margins[2] = np.max(rects[:, 2])
                    if np.max(rects[:, 3]) > margins[3]:
                        margins[3] = np.max(rects[:, 3])

            self.dataStore[fileIndex] = (img, defectClasses, margins)
コード例 #14
0
def identifyCharacter(image1_path):

    # read image
    image = cv2.imread(image1_path)

    # Use technology similar to QR code to correct image angulation
    warped = FixPerspective.fixPerspective(imageOpenCV=image)

    # Remove colored pixels to compare only black ones
    warped = util.cleanImage(warped)

    # resize image
    image = util.resizeImage(warped)

    # show the original and scanned images
    print("STEP 3: Apply perspective transform")
    cv2.imshow("Original", image)
    cv2.waitKey(0)

    # image to ascii art
    ascii1 = ImageToAscii.handle_image_conversion(image=image)
    print("ascii da imagem de entrada")
    print ascii1

    # find most similar key
    # load KNN table
    filehandler = open("matrix-KNN.pickle", 'r')
    matrix = pickle.load(filehandler)

    smallerDistance = float('inf')  # a very large number
    result = None
    for case in matrix:
        distance = Levenshtein.levenshtein(ascii1,
                                           case[0],
                                           signficance=case[1])
        if distance < smallerDistance:
            smallerDistance = distance
            result = case[1]

    print("distance = ", smallerDistance)
    print("result = ", result)
コード例 #15
0
def main():
    '''
	Steps:

	Read all images of "testeCases/" folder catalogging by their names
	i.g: image A.png contain the best case of an "A" char, so save it into the KNN

	KNN will be a ordered matrix nX2
		n == number of tests char
		2 = ascii art + corresponding char

	Do not read from "testeCases/unknown/". This images will be use to test the KNN
	'''

    # Get all image paths
    folder = "../testeCases"

    paths = [os.path.join(folder, nome) for nome in os.listdir(folder)]
    files = [arq for arq in paths if os.path.isfile(arq)]
    pngs = [arq for arq in files if arq.lower().endswith(".png")]

    # convert each image to ascii and store into matrix
    matrix = []
    for imagePath in pngs:

        # read image
        image = cv2.imread(imagePath)

        # resize image to compare always image of same sizes
        image = util.resizeImage(image)

        # Convert to ascii and build matrix
        asciiImage = ImageToAscii.handle_image_conversion(image=image)
        imageName = imagePath.replace(folder + '/', '').replace('.png', '')
        matrix.append([asciiImage, imageName])

    # save matrix
    filehandler = open("matrix-KNN.pickle", 'w')
    pickle.dump(matrix, filehandler)
コード例 #16
0
ファイル: util_test.py プロジェクト: assli100/kodi-openelec
 def test_resizeImage(self):
     self.assertEqual("feefifofum_389x292.jpg", util.resizeImage("feefifofum_132x99.jpg"))
     self.assertEqual("feefifofum_389x292.jpg", util.resizeImage("feefifofum_123x321.jpg"))
コード例 #17
0
      contours = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
      # contours = imutils.grab_contours(contours)
      img =  None
      tf_sign = 'None'
      for c in contours:
        area = cv2.contourArea(c)
        # cv2.drawConatours()
        # if area > 150:
        if(area > 150 and area<600):
            x,y,w,h = cv2.boundingRect(c)
            if (abs(w-h) < (w+h)/10):
                img = frame[y:y+h,x:x+w]
      if img is not None:
        # cv2.imshow('imge',img)
        imagePredict = util.resizeImage(img)
        imagePredict = imagePredict / 255.0
        predicted = model.predict(np.array([imagePredict]))
        # print (predicted)
        if predicted[0][0] <= 0.2:
            # print('left')
            tf_sign = 'left'
            # rospy.sleep(1)
        if predicted[0][0] > 0.8 and predicted[0][0] <= 1.0:
            # print('right'
            tf_sign = 'right'
      publish_traffic.publish(tf_sign)
      if(config.get_debug() == True):
          cv2.imshow('output', mask)
      # cv2.imshow('hsv', hsv)
    else:
コード例 #18
0
 def test_resizeImage(self):
     self.assertEqual('feefifofum_389x292.jpg',
                      util.resizeImage('feefifofum_132x99.jpg'))
     self.assertEqual('feefifofum_389x292.jpg',
                      util.resizeImage('feefifofum_123x321.jpg'))