コード例 #1
0
ファイル: appFlask.py プロジェクト: Wenfei134/NotAVirus
def prediction():
    if request.method == 'POST':
        file = request.files['audiofile']
        path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
        file.save(path)
        melSpec = submitAudio(path)
        os.remove(path)
        results = ci.classify_image(melSpec)
        # if melSpec not None:
        #     prediction = getPrediction( './audio.jpg')

        return results
    else:
        return "hello"
コード例 #2
0
ファイル: object_recognition.py プロジェクト: expper/expper
 def detect_image(self):
     path = "pt.png"
     cap = None
     if False == self.is_enabled:
         cap = cv2.VideoCapture(0)
         ret, self.frame = cap.read()
     cv2.imwrite(path, self.frame)
     if cap != None:
         cap.release()
         cv2.destroyAllWindows()
     c_img = classify_image()
     s = self.__detected_object_to_answer(c_img.detect_image(path))
     os.remove(path)
     if s == "":
         return "I am sorry but I don\'t know."
     return "This is " + s
コード例 #3
0
ファイル: rpi_client.py プロジェクト: MakeNTU/2021_team11_
    print(start_time - time.time(), "secs to start.")
    #if current_event["Event"] == "Sleep in bed" and start_time > time.time():

    #set_angle(35, servo_channel, pwm)

    while (time_left > 0):
        ret, frame = cap.read()
        #cv2.imshow('frame', frame )

        if time.time() - start_time >= time_interval:

            frame = frame[:, :, ::-1]  #change color from BGR to RGB
            image = Image.fromarray(frame)
            image = image.resize((width, height))  # resize image to (224, 224)
            set_angle(90, servo_channel, pwm)
            label_id1, prob1 = classify_image(interpreter1, image)
            set_angle(35, servo_channel, pwm)
            label_id2, prob2 = classify_image(interpreter2, image)
            if labels2[label_id2][2:] == "sleep in bed":
                classification_result = "sleep in bed"
            else:
                classification_result = labels1[label_id1][2:]
            start_time = time.time()
            time_left -= time_interval

            # Return the classification label of the image.
            #classification_result = labels1[label_id1][2:]
            send_msg = [
                "classifiedResult", {
                    "origin": current_event,
                    "classified_result": classification_result,
コード例 #4
0
print 'Starting road detector'

if (len(sys.argv) == 3):
    if (sys.argv[1] == "train"):
        train.train_model(sys.argv[2])
    elif (sys.argv[1] == "test"):
        image_path = sys.argv[2]
        files = os.listdir(image_path)
        files = list(filter(lambda x: 'jpg' in x and 'aux' not in x, files))
        filenames = list(map((lambda x: re.sub('\.jpg$', '', x)), files))
        for file in filenames:
            print '-----------------'
            print 'classifying image ' + file
            path = image_path + file + '.jpg'
            result = classify_image.classify_image(path)
            cv2.imwrite(file + 'raw.jpg', result)
            print 'processing image ' + file
            processed_image = extract_paths.extract_paths(result)
            cv2.imwrite(file + 'processed.jpg', processed_image)
    elif (sys.argv[1] == "post-process"):
        image_path = sys.argv[2]
        files = os.listdir(image_path)
        files = list(filter(lambda x: 'jpg' in x and 'aux' not in x, files))
        filenames = list(map((lambda x: re.sub('\.jpg$', '', x)), files))
        for file in filenames:
            print '-----------------'
            print 'processing image ' + file
            path = image_path + file + '.jpg'
            input_image_from_file = cv2.imread(path, 0)
            processed_image = extract_paths.extract_paths(
コード例 #5
0
def hello():
    image_url = request.forms.get('image_url')
    returnStr = ""
    for i in CIMG.classify_image(image_url):
        returnStr += i + "<br />"
    return returnStr + "<br />" + "<img src=" + image_url + ">"
コード例 #6
0
ファイル: TSLite_webcam.py プロジェクト: MakeNTU/2021_team11_
label_path = "models_and_labels./labels2.txt"

# Read class labels.
labels = load_labels(label_path)

interpreter = Interpreter(model_path)
print("Model Loaded Successfully.")

interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
print("Image Shape (", width, ",", height, ")")
init_time = time.time()
while (True):

    ret, frame = cap.read()
    cv2.imshow('frame', frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    if time.time() - init_time >= 2:
        #image = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)) # convert opencv(frame) to PIL(image)
        frame = frame[:, :, ::-1]
        image = Image.fromarray(frame)
        image = image.resize((width, height))  # resize image to (224, 224)
        label_id, prob = classify_image(interpreter, image)
        init_time = time.time()

        # Return the classification label of the image.
        classification_label = labels[label_id]
        print("Image Label is :", classification_label, ", with Accuracy :",
              np.round(prob * 100, 2), "%.")
コード例 #7
0
    csv_path = cmdline_args.csv
    if cmdline_args.csv == "default":
        csv_path = (cmdline_args.folder + "/overview.csv")

    with open(csv_path) as csv_file:
        csv_reader = csv.reader(csv_file)
        line_count = 0
        for row in csv_reader:
            if line_count == 0:
                line_count += 1
            else:
                split_row = row[0].split(';')
                if split_row[1] != 'None' and cmdline_args.skip:
                    line_count += 1
                    continue
                image_name = f"classified_{line_count}b.jpg"
                image_path = (cmdline_args.folder + "\\" + split_row[0] +
                              ".ndpi").replace("/",
                                               "\\").replace("\\\\", "\\")
                annotation_path = (cmdline_args.folder + "\\" +
                                   split_row[1]).replace("/", "\\").replace(
                                       "\\\\", "\\")
                mask_path = (cmdline_args.folder + "\\" +
                             split_row[2]).replace("/",
                                                   "\\").replace("\\\\", "\\")
                classify_image(model, device, image_path, mask_path,
                               annotation_path,
                               f"{output_folder}\\{image_name}", patch_size)

                line_count += 1
コード例 #8
0
ファイル: TSLite_test.py プロジェクト: MakeNTU/2021_team11_
#print("Model Loaded Successfully.")

interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
#print("Image Shape (", width, ",", height, ")")

interpreter2.allocate_tensors()
_, height, width, _ = interpreter2.get_input_details()[0]['shape']

# Load an image to be classified.
image = Image.open(img_folder + "dog.jpg").convert('RGB').resize(
    (width, height))

# Classify the image.
time1 = time.time()
label_id, prob = classify_image(interpreter, image)
label_id2, prob2 = classify_image(interpreter2, image)
time2 = time.time()
classification_time = np.round(time2 - time1, 3)
#print("Classificaiton Time =", classification_time, "seconds.")

# Read class labels.
labels = load_labels(label_path)
labels2 = load_labels(label2_path)
# Return the classification label of the image.
classification_label = labels[label_id].split(" ")[1]
classification_label2 = labels2[label_id2].split(" ")[1]
print("Image Label is :", classification_label, ", with Accuracy :",
      np.round(prob * 100, 2), "% by interpreter1")
print("Image Label is :", classification_label2, ", with Accuracy :",
      np.round(prob2 * 100, 2), "% by interpreter2")