コード例 #1
0
def Test():
    print("Open your Palm Wide...")
    time.sleep(5)
    ret, frame = cap.read()

    frame = cv2.flip(frame, 1)

    img = removeBG(frame)
    img = img[0:250, 0:250]

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)

    cv2.imwrite("predict.jpg", blur)
    result = classify("predict.jpg")

    print("Close your Palm...")
    time.sleep(5)

    ret, frame = cap.read()
    frame = cv2.flip(frame, 1)

    img = removeBG(frame)
    img = img[0:250, 0:250]

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)
    
    cv2.imwrite("predict.jpg", blur)
    
    result2 = classify("predict.jpg")
    
    if result == 'open':
        print("Feedback: Good Job! Your hand was fully opened!")
        
    elif result == 'nothing':
        print("Feedback: Please place your hand in the blue rectangle!")
        
    else:
        print("Feedback: Your hand was not fully opened.")

    if result2 == 'close':
        print("Feedback: Good Job! Your hand is fully closed!")
        
    elif result2 == 'nothing':
       print("Feedback: Please place your hand in the blue rectangle!")
       
    else:
        print("Feedback: Your hand was not fully closed.")
 def implementation(self):
     number_of_clusters = len(self.clusters)
     level = 0
     self.initialization()
     print("Running Agglomerative Hierarchical Clustering algorithm...")
     start = time.time()
     sleep(2.0)
     while len(self.clusters) != self.number_of_clusters:
         c = []
         print("Level %d..." % level)
         for i, j in itertools.combinations(self.clusters, 2):
             distance = self.distance_of_clusters(self.clusters[i],
                                                  self.clusters[j])
             c.append([distance, i, j])
         min_distance = min(c, key=lambda c: c[0])
         #print(min_distance)
         self.merge_clusters(min_distance[1], min_distance[2])
         level += 1
     end = time.time()
     execution_time = self.compute_execution_time(start, end)
     classification = classify(self.clusters, self.number_of_data)
     classes = classification.classification()
     metrics = Evaluation_Metrics(classes, self.number_of_data)
     purity = metrics.Purity()
     totalF_measure = metrics.TotalF_measure()
     information = Information(purity, totalF_measure, execution_time,
                               "Agglomerative_Hierarchical_Clustering")
     information.print_information()
コード例 #3
0
def callback(i):
    global ans
    try:
        print(f"Classifying (predict/predict_{i}.jpg)")
        start_time = time.time()
        ans.append(classify(f"predict/predict_{i}.jpg"))
        print("Time:", time.time() - start_time)  # print time taken
    except Exception as e:
        print(e)
コード例 #4
0
    def implementation(self):
        results = list(
        )  #contain 10 dict of final clusterizations after 10 times run of kmeans algorithm
        for i in range(
                self.iterations
        ):  #run the kmeans 10 times with different centers each time
            centers = self.choose_centers()
            #print(centers)
            sleep(6.0)
            print("Running the K-Means algorithm %dth time" % (i + 1))
            start = time.time()
            while True:
                clusters = dict()

                for cluster in range(self.k):
                    clusters[cluster] = list()

                for data in self.dataset:
                    distances = [
                        self.euclidean_distance(data, centers[center],
                                                len(data) - 1)
                        for center in centers
                    ]
                    clusters[distances.index(min(distances))].append(data)

                previus_centers = dict(centers)

                for cluster in clusters:
                    centers[cluster] = np.average(clusters[cluster], axis=0)
                sleep(5.0)
                if (self.algorithm_converged(previus_centers, centers)):
                    results.append(clusters)
                    break
            #print(clusters)
            end = time.time()
            execution_time = self.compute_execution_time(start, end)
            classification = classify(clusters, len(self.dataset))
            classes = classification.classification()
            metrics = Evaluation_Metrics(classes, len(self.dataset))
            purity = metrics.Purity()
            totalF_measure = metrics.TotalF_measure()
            information = Information(purity, totalF_measure, execution_time,
                                      self.alg)
            information.print_information()
コード例 #5
0
def TestV2():
    global space
    sleep(3)
    allowance = 3
    iris_detect = eyeris_detector
    max_score = 2 * repSlider.get()
    score = 0

    for i in range(repSlider.get()):
        text.insert(END, text_lst[4] + str(round(d["Time"][-1], 1)) + "\n")
        if cSpaceVar.get():
            text.insert(END, text_lst[5])

        LookImage2.config(image=imgLook)
        LookImage.config(image=transImage)

        soundRight = pyglet.media.load('Right Iris.wav',
                                       streaming=True)  # play audio
        soundRight.play()

        while True:  # check if user pressed space
            sleep(0.1)
            if space:
                if cSpaceVar.get():
                    text.insert(END, text_lst[6])
                else:
                    sleep(3)
                space = False
                break

        eye_cascade = cv2.CascadeClassifier(
            'haar/haarcascade_eye_tree_eyeglasses.xml')  # load haar cascade
        eyes = eye_cascade.detectMultiScale(eyeris_detector.frame, 1.3,
                                            5)  # detect eyes with haar cascade
        while len(eyes) < 2:
            eyes = eye_cascade.detectMultiScale(
                cv2.cvtColor(eyeris_detector.frame, cv2.COLOR_BGR2GRAY), 1.3,
                5)  # detect eyes with haar cascade

        for i in range(2):
            (x, y, w, h) = eyes[i]
            roi = eyeris_detector.frame[y:y + w,
                                        x:x + h]  # get region of interest
            if i == 1:
                cv2.imwrite(
                    "left.jpg",
                    cv2.cvtColor(cv2.resize(roi, (400, 400)),
                                 cv2.COLOR_BGR2GRAY))  # write the images
            else:
                cv2.imwrite(
                    "right.jpg",
                    cv2.cvtColor(cv2.resize(roi, (400, 400)),
                                 cv2.COLOR_BGR2GRAY))

        text.insert(END, text_lst[12])

        soundDone = pyglet.media.load('ImageTaken.wav',
                                      streaming=True)  # play audio
        soundDone.play()

        # calling the AI
        a = classify("left.jpg")
        if a != "normal":
            b = classify("right.jpg")

        if a != "normal" and b != "normal":
            text.insert(END, text_lst[8])
            score += 1
        else:
            text.insert(END, text_lst[9])

        LookImage2.config(image=transImage)
        LookImage.config(image=imgLook)

        text.insert(END, text_lst[7] + str(round(d["Time"][-1], 1)) + "\n")
        if cSpaceVar.get():
            text.insert(END, text_lst[5])

        soundLeft = pyglet.media.load('Left Iris.wav',
                                      streaming=False)  # play audio
        soundLeft.play()

        while True:  # detect if user pressed space
            sleep(0.1)
            if space:
                if cSpaceVar.get():
                    text.insert(END, text_lst[6])
                else:
                    sleep(3)
                space = False
                break

        left_roi = CalLeft.update_roi()
        right_roi = CalRight.update_roi()

        cv2.imwrite("left.jpg", (cv2.resize(left_roi, (400, 400))))
        cv2.imwrite("right.jpg", (cv2.resize(right_roi, (400, 400))))

        text.insert(END, text_lst[12])

        soundDone = pyglet.media.load('ImageTaken.wav',
                                      streaming=True)  # play audio
        soundDone.play()

        a = classify("left.jpg")
        if a != "normal":
            b = classify("right.jpg")

        if a != "normal" and b != "normal":
            text.insert(END, text_lst[8])
            score += 1
        else:
            text.insert(END, text_lst[9])

    # display score
    text.insert(
        END, f"Test Complete! You got {score} out of {max_score}\nTime: " +
        str(round(d["Time"][-1], 1)) + "\n")

    # display feedback
    if (score / max_score) * 1000 < 650:
        text.insert(END, text_lst[10])
    else:
        text.insert(END, text_lst[11])

    sound = pyglet.media.load('Test Complete.wav',
                              streaming=False)  # play audio
    sound.play()

    LookImage2.config(image=transImage)
    LookImage.config(image=transImage)
コード例 #6
0
testing_set = train(labelled_features_file[:-4] + "2.txt", test_name, "ner")
test(labelled_features_file[:-4] + "2.txt", test_name, "ner", testing_set)
print("\n\n\n####################################\nNER")
testing_set = train(labelled_features_file[:-4] + "3.txt", test_name, "ner")
test(labelled_features_file[:-4] + "3.txt", test_name, "ner", testing_set)
print("\n\n\n####################################\nNER")
testing_set = train(labelled_features_file[:-4] + "4.txt", test_name, "ner")
test(labelled_features_file[:-4] + "4.txt", test_name, "ner", testing_set)
print("\n\n\n####################################\nNER")
testing_set = train(labelled_features_file[:-4] + "5.txt", test_name, "ner")
test(labelled_features_file[:-4] + "5.txt", test_name, "ner", testing_set)
print("\n\n\n####################################\nNER")
testing_set = train(labelled_features_file[:-4] + "6.txt", test_name, "ner")
test(labelled_features_file[:-4] + "6.txt", test_name, "ner", testing_set)
print("\n\n\n####################################\nNER")
testing_set = train(labelled_features_file[:-4] + "7.txt", test_name, "ner")
test(labelled_features_file[:-4] + "7.txt", test_name, "ner", testing_set)
print("\n\n\n####################################\nNER")
testing_set = train(labelled_features_file[:-4] + "8.txt", test_name, "ner")
test(labelled_features_file[:-4] + "8.txt", test_name, "ner", testing_set)

print("\n\n\n####################################\nRE")
testing_set = train(labelled_features_file[:-4] + "_relationships.txt",
                    test_name, "re")
test(labelled_features_file[:-4] + "_relationships.txt", test_name, "re",
     testing_set)

classify(test_name, "ner", input_text_file,
         "tests/" + test_name + "/testOutputNER.txt")
classify(test_name, "re", "tests/" + test_name + "/testOutputNER.txt",
         "tests/" + test_name + "/testOutputRE.txt")
コード例 #7
0
ファイル: main_out_lab.py プロジェクト: Shirlly/GAN
        
        print '\n====== Augment data size ', AuX_train.shape , ' ======\n'
        print '\n====== Augment data size ', Auy_train.shape , ' ======\n'
        

    return save_path, X_train, y_train, X_valid, y_valid, AuX_train, Auy_train, aux, auy



if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--dt', help='datetime for the initialization of the experiment')
    parser.add_argument('--train', action='store_true')
    parser.add_argument('--test', help='test model')
    parser.add_argument('--modelclass', required=True, help='model class')
    # parser.add_argument('--th', help='threshold')


    args = parser.parse_args()
    print(args)
    if args.train:
        if args.dt:
            modelpath = train(args.modelclass, args.dt)
        else:
            dt = datetime.now()
            dt = dt.strftime('%Y%m%d_%H%M_%S%f')
            modelpath, X_train, y_train, X_valid, y_valid, AuX_train, Auy_train, aux, auy = train(args.modelclass, dt)
            classify(X_train, y_train, X_valid, y_valid, AuX_train, Auy_train, aux, auy)