Example #1
0
def spoken_term_detection_truncated(keywords, utterances, left_encode_num,
                                    right_encode_num, distance_type,
                                    output_dir):
    for i in range(len(keywords)):
        keyword_sampling_feature = keywords[i].sampling_feature
        keyword_phone_num = keyword_sampling_feature.shape[0]
        (left_position,
         right_position) = get_truncated_position(keyword_phone_num,
                                                  left_encode_num,
                                                  right_encode_num)
        keyword_indexes_selected = get_real_indexes(range(keyword_phone_num),
                                                    left_encode_num,
                                                    right_encode_num)
        keyword_sampling_feature_selected = keyword_sampling_feature[
            keyword_indexes_selected, :]
        output_file = output_dir + keywords[i].getFileId() + ".result"
        fid = open(output_file, "w")
        for j in range(len(utterances)):
            utterance_sampling_feature = utterances[j].sampling_feature
            utterance_phone_num = utterance_sampling_feature.shape[0]
            utterance_indexes_selected = get_real_indexes(
                range(utterance_phone_num), left_encode_num, right_encode_num)
            utterance_sampling_feature_selected = utterance_sampling_feature[
                utterance_indexes_selected, :]
            distance = Distance.distance(
                keyword_sampling_feature_selected[:, left_position:
                                                  right_position],
                utterance_sampling_feature_selected[:, left_position:
                                                    right_position],
                distance_type,
                sub_num=PHONE_LEN)
            fid.writelines(str(-distance.min()) + "\n")
        fid.close()
        log.Log("finished the std for keyword " + str(keywords[i].getFileId()))\
Example #2
0
File: Chat.py Project: roohom/demo
def has_distance(text):
    if Distance.isOpen:
        Distance.isOpen = False
        return "你距离我 %s" % Distance.distance("上海市长宁区凯利大厦", text)
    elif text in ['你在哪', '离我多远', '距离']:
        Distance.isOpen = True
        return Distance.HELP_MSG
    else:
        return has_weather(text)
def search(img_path):
    cf = CaculateColorVector.get_color_feature(img_path)
    cfs = File_Operation.read_list_from_file('/Users/ligang/Documents/cfs.txt')
    distances = []
    for cf_tuple in cfs:
        d = Distance.distance(cf, cf_tuple[1])
        distances.append((cf_tuple[0], d))
    top_distances = heapq.nsmallest(10, distances, key=lambda x: x[1])
    print top_distances
def search(img_path):
    ulbp_f = ulbp.ulbp(img_path)
    ulbps = File_Operation.read_list_from_file('/Users/ligang/Documents/ulbp.txt')
    distances = []
    for ulbp_tuple in ulbps:
        d = Distance.distance(ulbp_f, ulbp_tuple[1])
        distances.append((ulbp_tuple[0], d))
    top_distances = heapq.nsmallest(20, distances, key=lambda x: x[1])
    dstDir = '/Users/ligang/Documents/Emilie/lbp_search_result'
    img_set_dir = '/Users/ligang/Documents/Emilie/dress'
    for top in top_distances:
        shutil.copy2(os.path.join(img_set_dir, top[0]), os.path.join(dstDir, top[0]))
    print top_distances
def search(img_path):
    cf = CaculateColorVector.get_color_feature(img_path)
    unlbp_feature = Uniform_LBP.ulbp(img_path)

    cfs = File_Operation.read_list_from_file('/Users/ligang/Documents/cfs.txt')
    ulbps = File_Operation.read_list_from_file('/Users/ligang/Documents/ulbp.txt')

    distances = []

    for cf_tuple, ulbp_tuple in zip(cfs, ulbps):
        assert cf_tuple[0] == ulbp_tuple[0]

        d_color = Distance.distance(cf, cf_tuple[1])
        d_ulbp = Distance.distance(unlbp_feature, ulbp_tuple[1])
        d = d_color + d_ulbp

        distances.append((cf_tuple[0], d))

    top_distances = heapq.nsmallest(20, distances, key=lambda x: x[1])
    dstDir = '/Users/ligang/Documents/Emilie/colorlbp_search_result'
    img_set_dir = '/Users/ligang/Documents/Emilie/dress'
    for top in top_distances:
        shutil.copy2(os.path.join(img_set_dir, top[0]), os.path.join(dstDir, top[0]))
    print top_distances
Example #6
0
 def __init__(self):
     self.__img = Image.image()
     self.__greenFinder = GreenFinder.greenFinder()
     self.__dist = Distance.distance()
     self.__targetAngle = TargetAngle.targetAngle()
Example #7
0
def detect_QR_code(camera, PBR):
    print "QR code reading"
    #	PBR.SetMotor1(0.2)
    #        PBR.SetMotor2(-0.2)
    time.sleep(0.1)
    done = False
    count = 0
    while count < 8 and not done:
        print "not done"
        rawCapture = PiRGBArray(camera)

        # grab an image from the camera
        camera.capture(rawCapture, format="bgr")
        image = rawCapture.array

        try:
            dist = Distance.distance(image)
        except:
            return False

        print "dist" + str(dist)

        if dist > 24:
            return False

        if dist > 12:
            PBR.SetMotor1(0.2)
            PBR.SetMotor2(-0.2)
            time.sleep(0.01 * (dist - 6))
            continue
        elif dist < 6:
            PBR.SetMotor1(-0.2)
            PBR.SetMotor2(0.2)
            time.sleep(0.01 * (6 - dist))
            continue

        count += 1

        #		cv2.imshow("rect",image)

        img = image

        #show the image
        #wait until some key is pressed to procced

        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        edges = cv2.Canny(image, 100, 200)

        #		cv2.imshow("edges", edges)
        #		key = cv2.waitKey(1) & 0xFF

        contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        cv2.drawContours(image, contours, -1, (0, 255, 0), 3)

        mu = []
        mc = []
        mark = 0
        for x in range(0, len(contours)):
            mu.append(cv2.moments(contours[x]))

        for m in mu:
            if m['m00'] != 0:
                mc.append((m['m10'] / m['m00'], m['m01'] / m['m00']))
            else:
                mc.append((0, 0))

    #	print hierarchy[0]

    #	print(len(contours))

        for x in range(0, len(contours)):
            #		print "x"+str(int(mc[x][0]))+" "+str(int(mc[x][1]))
            k = x
            c = 0
            while (hierarchy[0][k][2] != -1):
                k = hierarchy[0][k][2]
                c = c + 1
            if hierarchy[0][k][2] != -1:
                c = c + 1

    #		print str(c )

            if c >= 5:
                if mark == 0:
                    A = x
                elif mark == 1:
                    B = x
                elif mark == 2:
                    C = x
                mark = mark + 1
    #			break

    #	print "mark"+str(mark)
    #	print str(mc[A])
    #	if mark == 1:
    #		cv2.circle(image,( int(mc[A][0]),int(mc[A][1])), 5, ( 110, 220, 0 ), -1)
    #	elif mark == 2:
    #		cv2.circle(image,( int(mc[A][0]),int(mc[A][1])), 5, ( 110, 220, 0 ), -1)
    #		cv2.circle(image,( int(mc[B][0]),int(mc[B][1])), 5, ( 110, 220, 0 ), -1)
    #	elif mark == 3:
    #		cv2.circle(image,( int(mc[A][0]),int(mc[A][1])), 5, ( 110, 220, 0 ), -1)
    #		cv2.circle(image,( int(mc[B][0]),int(mc[B][1])), 5, ( 110, 220, 0 ), -1)
    #		cv2.circle(image,( int(mc[C][0]),int(mc[C][1])), 5, ( 110, 220, 0 ), -1)

    #	cv2.imshow("BW_image", image)

        if mark > 2:
            AB = distance(mc[A], mc[B])
            BC = distance(mc[B], mc[C])
            AC = distance(mc[A], mc[C])

            if (AB > BC and AB > AC):
                outlier = C
                median1 = A
                median2 = B
            elif (AC > AB and AC > BC):
                outlier = B
                median1 = A
                median2 = C
            elif (BC > AB and BC > AC):
                outlier = A
                median1 = B
                median2 = C

            top = outlier
            dist = lineEquation(mc[median1], mc[median2], mc[outlier])
            slope, align = lineSlope(mc[median1], mc[median2])

            if align == 0:
                bottom = median1
                right = median2
            elif (slope < 0 and dist < 0):
                bottom = median1
                right = median2
                orientation = 0
            elif (slope > 0 and dist < 0):
                right = median1
                bottom = median2
                orientation = 1
            elif (slope < 0 and dist > 0):
                right = median1
                bottom = median2
                orientation = 2
            elif (slope > 0 and dist > 0):
                bottom = median1
                right = median2
                orientation = 3

            areatop = 0.0
            arearight = 0.0
            areabottom = 0.0

            #		print "top"+str(top)
            #		print "right"+str(right)
            #		print "bottom"+str(bottom)
            #		print "cv2.contourArea(contours[top]) "+str(cv2.contourArea(contours[top]) )
            #		print "cv2.contourArea(contours[right]) "+str(cv2.contourArea(contours[right]) )
            #		print "cv2.contourArea(contours[bottom]) "+str(cv2.contourArea(contours[bottom]) )

            if (top < len(contours) and right < len(contours)
                    and bottom < len(contours)
                    and cv2.contourArea(contours[top]) > 10
                    and cv2.contourArea(contours[right]) > 10
                    and cv2.contourArea(contours[bottom]) > 10):
                tempL = []
                tempM = []
                tempO = []
                src = []
                dst = []
                N = (0, 0)
                tempL = getVertices(contours, top, slope, tempL)
                tempM = getVertices(contours, right, slope, tempM)
                tempO = getVertices(contours, bottom, slope, tempO)
                L = updateCornerOr(orientation, tempL)
                M = updateCornerOr(orientation, tempM)
                O = updateCornerOr(orientation, tempO)

                qr = np.zeros((100, 100))

                iflag, N = getIntersection(M[1], M[2], O[3], O[2], N)
                src.append(L[0])
                src.append(M[1])
                src.append(N)
                src.append(O[3])
                src = np.asarray(src, np.float32)

                dst.append((0, 0))
                dst.append((100, 0))
                dst.append((100, 100))
                dst.append((0, 100))
                dst = np.asarray(dst, np.float32)

                warp_matrix = cv2.getPerspectiveTransform(src, dst)
                warped = cv2.warpPerspective(img, warp_matrix, (100, 100))

                #			cv2.imshow("warped",warped)
                cv2.circle(img, N, 1, (0, 0, 255), 2)
                cv2.drawContours(img, contours, top, (255, 0, 0), 2)
                cv2.drawContours(img, contours, right, (0, 255, 0), 2)
                cv2.drawContours(img, contours, bottom, (0, 0, 255), 2)
                warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

                scanner = zbar.ImageScanner()
                scanner.parse_config('enable')
                imagez = zbar.Image(warped.shape[0], warped.shape[1], 'Y800',
                                    warped.tostring())
                scanner.scan(imagez)
                for symbol in imagez:
                    x = symbol.data
                    print "------------------------------" + x + "------------------------------"
                    message = x
                    if message == "Stop 5 min":
                        Move.bypass(PBR)
                    elif message == "Spin 1 circle":  #right
                        Move.turn_right(45, PBR)
                    return True