Пример #1
0
    def detect(self, image_path, filters=None):
        print("=== detect {}".format(image_path))

        #image_name = image_path.split('/')[-1]
        pos = image_path.rfind('/')
        p = image_path.rfind('\\')
        if (p > pos):
            pos = p

        image_name = image_path[pos + 1:]
        print("=== image_name {}".format(image_name))

        image = cv2.imread(image_path)
        #image = Image.open(image_path)
        #image = image.convert("RGB")

        #image = np.asarray(Image.open(path).convert('RGB'))

        src_image = image.copy()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        h, w, _ = image.shape

        image = np.array(image)
        image, scale = preprocess_image(image, image_size=self.image_size)

        # run network
        start = time.time()
        boxes, scores, labels = self.model.predict_on_batch(
            [np.expand_dims(image, axis=0)])
        boxes, scores, labels = np.squeeze(boxes), np.squeeze(
            scores), np.squeeze(labels)
        elapsed = time.time() - start
        print("=== elapsed time {}".format(elapsed))

        boxes = postprocess_boxes(boxes=boxes, scale=scale, height=h, width=w)
        #print("=== boxes{}".format(boxes))

        # select indices which have a score above the threshold
        indices = np.where(scores[:] > self.score_threshold)[0]
        #print("=== indices{}".format(indices))

        # select those detections
        boxes = boxes[indices]
        labels = labels[indices]
        #print("=== boxes{}".format(boxes))
        #print("=== labels {}".format(labels))

        detected_objects = []
        objects_stats = {}
        draw_boxes_with_filters(src_image, boxes, scores, labels, self.colors,
                                self.classes, detected_objects, objects_stats,
                                filters)

        self.SEP = ", "
        self.NL = "\n"

        self.save_detected_image(image_name, src_image, filters)
        self.save_detected_objects(image_name, detected_objects, filters)
        self.save_objects_stats(image_name, objects_stats, filters)
Пример #2
0
def create_pose_img(points, shape, image):
    """Creates the image of the pose keypoints from the image
    Args:
      points: list of keypoints with x,y,v for each point
      shape: tuple of shape of the input image to put the points at the corresponding locations
      image: opencv format image corosponding to the key-points
    Returns:
     img: numpy array as an image
    """
    img = image.copy()
    colours = [[255,0,0],[0,255,0],[0,0,255],[255,255,0],[255,0,255],[0,255,255],[255,255,255],[122,255,122],
                        [122,122,255],[255,122,122],[255,255,122],[122,255,255],[0,255,122],[255,0,122],[255,122,0]]

    for i in range(int(len(points)/3)):
        if points[i*3+2] == 2:
            img = cv2.circle(img, (points[i*3], points[i*3+1]), 3, (0,0,255), 5)

    points2 = [(points[0],points[1]), (points[3],points[4]), (points[6],points[7]), (points[9],points[10]), (points[12],points[13]),
                (points[15],points[16]), (points[18],points[19]), (points[21],points[22]), (points[24],points[25]), (points[27],points[28]),
                (points[30],points[31]), (points[33],points[34]), (points[36],points[37]), (points[39],points[40]), (points[42],points[43])]

    lines = [[points2[0],points2[2]], [points2[1],points2[2]], [points2[2],points2[14]], [points2[14],points2[5]],
            [points2[5],points2[6]], [points2[14],points2[3]], [points2[3],points2[4]], [points2[14],points2[13]],
            [points2[13],points2[7]], [points2[7],points2[8]], [points2[8],points2[9]], [points2[13],points2[10]],
            [points2[10],points2[11]], [points2[11],points2[12]]]

    for i, line in enumerate(lines):

        if line[0] != (0,0) and line[1] != (0,0):
            img = cv2.line(img, line[0], line[1], colours[i], 3)
    
    cv2.imshow("img", img)
    cv2.waitKey(0)
    return img
Пример #3
0
def showImageCV(imagePath, top_three_preds):
    #bestGuess = next(iter(top_three_preds))
    letter, accuracy = next(iter(top_three_preds))
    text = "{}: {:.2f}%".format(letter, accuracy * 100)

    image = cv2.imread(imagePath)
    output = image.copy()
    cv2.putText(output, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                (0, 255, 4), 2)
    # show the output image
    cv2.imshow("Image", output)
    cv2.waitKey(0)
Пример #4
0
    def _add_text_overlay(self, image):
        """
        Add text overlay to image
        """
        assert self.noise_param < 1, 'Text parameter should be probability of occupancy'

        w, h = image.size
        c = len(image.getbands())

        if platform == 'linux':
            serif = '/usr/share/fonts/truetype/dejavu/DejaVuSerif.ttf'
        else:
            serif = 'Times New Roman.ttf'

        text_img = image.copy()
        text_draw = ImageDraw.Draw(text_img)
        mask_img = Image.new('1', (w, h))
        mask_draw = ImageDraw.Draw(mask_img)

        max_occupancy = np.random.uniform(0, self.noise_param)

        def get_occupancy(x):
            y = np.array(x, np.uint8)
            return np.sum(y) / y.size

        while 1:
            font = ImageFont.truetype(serif, np.random.randint(16, 21))
            length = np.random.randint(10, 25)
            chars = ''.join(choice(ascii_letters) for i in range(length))
            color = tuple(np.random.randint(0, 255, c))
            pos = (np.random.randint(0, w), np.random.randint(0, h))
            text_draw.text(pos, chars, color, font=font)

            # Update mask and check occupancy
            mask_draw.text(pos, chars, 1, font=font)
            if get_occupancy(mask_img) > max_occupancy:
                break

        return {'image': text_img, 'mask': None, 'use_mask': False}
def searchingBox(image, points, direction=(0, 1)):
    # Specify ROI
    pts = (points[0], (points[0] + points[2]), points[1],
           (points[1] + points[3]))

    # Apply ROI
    roi = image.copy()[pts[2]:pts[3], pts[0]:pts[1]]

    # Treshold
    ret, roi = cv.threshold(roi, 150, 255, cv.THRESH_TOZERO)

    # Find points with belongs to the edge
    roi = linesFiltration(roi, direction)
    pts = findLinesPoints(roi, direction)

    # Break in case of faulty input image
    if (pts is None):
        print("Any line found")
        return -1, -1, -1, -1

    # Fit line
    vector = np.array(pts)
    vx, vy, x, y = cv.fitLine(vector, cv.DIST_HUBER, 0, 0.01, 0.05)

    # Show ROI and fitted line on the orgnial image
    x = x + points[0]  # Go back to the global coordinate system
    y = y + points[1]
    line = vx, vy, x, y

    # Drawing line
    k = 10000
    p1 = (int(x - k * vx), int(y - k * vy))
    p2 = (int(x + k * vx), int(y + k * vy))
    cv.line(img, p1, p2, (255, 255, 255), 3, cv.LINE_AA, 0)
    cv.rectangle(img, (points[0], points[1], points[2], points[3]),
                 (255, 255, 255), 2)
    showResizedImg(img, 'Image', scale=0.5)  ### Visualization
    return line
def findArcPoint(image, line1, line2):
    # Solving linear equation to find lines crossing point
    vx1, vy1, x1, y1 = line1
    vx2, vy2, x2, y2 = line2
    A = np.array(
        [[vx1, 0, -1, 0], [vy1, 0, 0, -1], [0, vx2, -1, 0], [0, vy2, 0, -1]],
        dtype='float')
    B = np.array([-x1, -y1, -x2, -y2], dtype='float')
    R = np.linalg.inv(A).dot(B)
    xs, ys = R[2:]
    rot_ang = math.atan2(vy2, vx2)
    vy = abs(vx1 + vx2) if vy2 < 0 else abs(vy1 + vy2)
    vx = abs(vy1 + vy2) if vy2 < 0 else abs(vx1 + vx2)

    l = math.sqrt(vx**2 + vy**2)  # lenght of those vectors
    k = (
        PX2MM * 4
    ) / l  # how many vectors is between line crossing point and cutting insert arc centre
    p1 = (int(xs + k * vx), int(ys + k * vy))
    p2 = (int(xs), int(ys))
    cv.line(img, p1, p2, (255, 255, 255), 2, cv.LINE_AA, 0)

    # Find 4 possible arc centres of the cutting insert
    C = []  # coortinates of the 4 possible arc centres
    v = np.array([[vx, vy], [-vx, vy], [-vx, -vy], [vx, -vy]],
                 dtype='float')  # All possible direction of the vectors

    for i in range(len(v)):  # all possible configurations
        pom = xs + v[i][0] * k, ys + v[i][1] * k
        cv.circle(img, (int(xs + v[i][0] * k), int(ys + v[i][1] * k)), 1,
                  (255, 255, 255), 4)  ### Visualization
        C.append(pom)

    # Chose ROI with contains cutting insert arc - closest to the centre of the image
    min_dist = 9999
    img_cy, img_cx = img.shape[:2]
    for i in range(len(v)):
        dist = math.sqrt((C[i][0] - img_cx / 2)**2 + (C[i][1] - img_cy / 2)**2)
        if (dist < min_dist):
            min_dist = dist
            properArc = i
    xc, yc = C[properArc]  #proper arc centre coordinates

    # Build roi between arc centre(xc,yc) and lines crossing point (xs,ys) in dependece on their location
    inc = 100  # Offset outer boundaries by some offset to avoid cutting the arc
    rx0 = int(xc) if xc < xs else int(xs - inc)
    ry0 = int(yc) if yc < ys else int(ys - inc)
    rxk = int(xc) if xc > xs else int(xs + inc)
    ryk = int(yc) if yc > ys else int(ys + inc)
    roi = image.copy()[ry0:ryk, rx0:rxk]

    # Rotate roi
    ang = 0
    if (xc > xs and yc < ys): ang = 90
    elif (xc > xs and yc > ys): ang = 180
    elif (xc < xs and yc > ys): ang = 270
    roi = ndimage.rotate(roi, ang)

    ### Visualization ###
    cv.circle(img, (int(R[2]), int(R[3])), int(PX2MM * 4), (255, 255, 255),
              3)  # Lines intersection
    cv.circle(img, (int(xc), int(yc)), 5, (255, 255, 255), 3)  # Arc centre
    cv.circle(img, (int(xc), int(yc)), int(PX2MM * 4 / math.sqrt(2)),
              (255, 255, 255), 2)  # Arc radius
    #showResizedImg(roi,'Arc ROI',scale = 1 ) ### Visualization
    showResizedImg(img, 'Image', scale=0.5)  ### Visualization

    printTime("Find arc prep")

    # Polar transform and filtration
    try:
        roi = polarTransform(roi,
                             start_point=(0, 0),
                             r=(int(PX2MM * 1), int(PX2MM * 2.25)),
                             theta=90,
                             theta_inc=0.25)
    except:
        roi = roi
        print("Can't find cutting insert arc")
        return -1

    #showResizedImg(roi,'Arc ROI after polar transform',scale = 2 ) ### Visualization

    #Find edge on the image after polarTransform
    ret, roi2 = cv.threshold(roi, 150, 255, cv.THRESH_TOZERO)
    roi2 = linesFiltration(roi2, (0, -1))
    pts = findLinesPoints(roi2, (0, 1))

    if (pts is None):
        print("Any line found")
        return -1
    else:
        pts_y = []
        for i in range(len(pts)):
            pts_y.append(pts[i][0][1])
        statatistics = ExamineArc

        s = statatistics.srednia(pts_y)
        m = statatistics.mediana(pts_y)
        o = statatistics.odchylenie(pts_y, s)
        #print("Średnia: {:.2f}\nMediana: {:.2f}\nOdchylenie standardowe: {:.2f}".format(s,m,o))
        if (s < 137 and s > 129) and o < 1.5:
            cv.putText(img,
                       ('OK    ' +
                        'srednia: {:.2f} odchylenie: {:.2f}').format(s, o),
                       (100, 100), cv.FONT_HERSHEY_PLAIN, 5, 255, 2)
        else:
            cv.putText(img,
                       ('N_OK   ' +
                        'srednia: {:.2f} odchylenie: {:.2f}').format(s, o),
                       (100, 100), cv.FONT_HERSHEY_PLAIN, 5, 255, 2)
Пример #7
0
    def start_video(self):
        vidcap = cv2.VideoCapture(self.filename)
        success, image = vidcap.read()
        newImage = image.copy()
        model = load_model('model-280-0.989506-0.981818-0.064893.h5')
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
        bg2 = cv2.createBackgroundSubtractorMOG2(history=5000000,
                                                 varThreshold=72,
                                                 detectShadows=False)
        frame_width = int(vidcap.get(3))
        frame_height = int(vidcap.get(4))
        name = self.filename.split('.')
        frame_length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = vidcap.get(cv2.CAP_PROP_FPS)
        print(fps)
        count = 0
        countEspermas = CountSpermas()

        while (count != frame_length):
            sperms_coords = []
            sperms_coords_filter = []
            success, image = vidcap.read()
            if image is not None:
                newImage = image.copy()
            else:
                image = image
            count += 1

            gray_image = cv2.cvtColor(newImage, cv2.COLOR_RGB2GRAY)
            mask = bg2.apply(gray_image)
            arr = mask > 0
            cleaned = morphology.remove_small_objects(arr, min_size=15)
            mask_cleaned = morphology.remove_small_holes(cleaned, min_size=15)
            indices = mask_cleaned.astype(np.uint8)
            indices *= 255
            output = cv2.morphologyEx(indices, cv2.MORPH_OPEN, kernel)

            countEspermas.find_contornos(output, newImage, count, newImage,
                                         sperms_coords, sperms_coords_filter,
                                         model)

            th = 30
            if count > th:

                for each_sperm_coords in sperms_coords:

                    image_detection = OperacionesSpermas.image_detection(
                        image, each_sperm_coords)

                    OperacionesSpermas.sperm_evaluation(
                        image_detection, model, each_sperm_coords,
                        sperms_coords_filter)

            if not os.path.exists('Obj'):
                os.makedirs('Obj')

            path = 'Obj'

            if len(sperms_coords_filter) > 1 and count > th:
                cv2.imwrite(
                    os.path.join(path,
                                 name[0] + '_Frame_' + str(count) + '.jpg'),
                    image)

                txt_name = name[0] + '_Frame_' + str(count) + '.txt'
                file_txt = open("Database/" + txt_name, "w")
                for coords in sperms_coords_filter:
                    class_line_txt = '0 '
                    file_txt.write(class_line_txt)

                    x_c = coords[0] / frame_width
                    y_c = coords[1] / frame_height

                    x_n = (((coords[0] + 10) -
                            (coords[0] - 10)) / 2) / frame_width
                    y_n = (((coords[1] + 10) -
                            (coords[1] - 10)) / 2) / frame_height
                    info_coords = "%f %f %f %f" % (round(x_c, 6), round(
                        y_c, 6), round(x_n, 6), round(y_n, 6))
                    file_txt.write(info_coords)
                    next_txtline = "\n"
                    file_txt.write(next_txtline)

                    # DRAW BOUNDING
                    #cv2.rectangle(newImage, (coords[0]-10, coords[1]-10),
                    #              (coords[0]+10, coords[1]+10), (255,0,0), 2)
                    #cv2.imwrite(os.path.join(path , name[0]+'_Frame_' +
                    #                     str(count)+ '_seed' +'.jpg'), newImage)

                file_txt.close()

            # VISUALIZE DETECTION
            #cv2.imshow("Imagen", newImage)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        vidcap.release()
        cv2.destroyAllWindows()
image = base64.b64decode(my_string)
filename = 'output.jpg'  # I assume you have a way of picking unique filenames
with open(filename, 'wb') as f:
    f.write(image)
args = vars(ap.parse_args())
image = cv2.imread(filename)

height, width, channels = image.shape
print("shape: ", image.shape)
percent = (1500 * 100) / height
print(percent)

width = int(width * percent / 100)
height = int(height * percent / 100)

originalImage = image.copy()
# image = cv2.resize(image, (width,height))

print("shape: ", image.shape)

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help="path to the input image")
args = vars(ap.parse_args())

# define the answer key which maps the question number
# to the correct answer
ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
answersArray = []

# #DENSE Model
def upload_image():
    if 'image' not in request.files:
        return render_template(
            'ImageML.html',
            prediction='No posted image. Should be attribute named image')
    file = request.files['image']

    if file.filename == '':
        return render_template('ImageML.html',
                               prediction='You did not select an image')

    if file and allowed_file(file.filename):
        items = []

        use_tf_keras = False

        filename = file.filename
        originalFile = splitext(
            os.path.basename(filename))[0] + "_original_image" + splitext(
                os.path.basename(filename))[1]
        input_path = UPLOAD_FOLDER
        print("File Name --> " + filename)
        split_filename = filename.split(".")[0]
        split_filename = split_filename.split("_")[0]
        print("Image prefix value --> " + split_filename)

        if os.path.exists(input_path) and os.path.isdir(input_path):
            shutil.rmtree(input_path)
            print('input directory removed')

        ImageFile.LOAD_TRUNCATED_IMAGES = False
        img = Image.open(BytesIO(file.read()))

        K.clear_session()
        config = AcneInferenceConfig()

        model = modellib.MaskRCNN(mode="inference",
                                  config=config,
                                  model_dir='./')

        model.load_weights(COCO_TRAINED_MODEL, by_name=True)
        image = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB)
        actual_image = image.copy()
        actual_image = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB)
        actual_image = cv2.cvtColor(np.asarray(actual_image),
                                    cv2.COLOR_RGB2BGR)
        actual_image = imutils.resize(actual_image, width=600)
        image = imutils.resize(image, width=600)

        r = model.detect([image], verbose=1)[0]

        for i in range(0, r["rois"].shape[0]):
            mask = r["masks"][:, :, i]
            image = visualize.apply_mask(image,
                                         mask, (1.0, 0.0, 0.0),
                                         alpha=0.5)
            image = visualize.draw_box(image, r["rois"][i], (1.0, 0.0, 0.0))

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        for i in range(0, len(r["scores"])):
            (startY, startX, endY, end) = r["rois"][i]
            classID = r["class_ids"][i]
            label = CLASS_NAMES[classID]
            score = r["scores"][i]

            text = "{}: {:.4f}".format(label, score)
            y = startY - 10 if startY - 10 > 10 else startY + 10
            cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.4, (0, 255, 0), 1)

        if not os.path.exists(input_path):
            os.makedirs(input_path)
            imageio.imwrite(input_path + filename, image)
            imageio.imwrite(input_path + originalFile, actual_image)

        #time.sleep(5)
        if len(r["scores"]) > 1:
            items.append('Acne Detected')
        else:
            items.append('No Acne Detected')

        response = {'Prediction': items}

        basepath = 'static/uploads'
        imagespath = os.path.join(basepath, '*g')
        images_list = {}

        for imag in glob.glob(imagespath):
            if "_original_image" in splitext(os.path.basename(imag))[0]:
                images_list[0] = originalFile
            else:
                images_list[1] = filename

        flash(
            'Image successfully uploaded and displayed : {}'.format(response))
        return render_template('ImageML.html',
                               filename=images_list[1],
                               filename1=images_list[0])
    else:
        return render_template('ImageML.html',
                               prediction='Invalid File extension')
Пример #10
0
image = base64.b64decode(my_string)
filename = 'output.jpg'  # I assume you have a way of picking unique filenames
with open(filename, 'wb') as f:
    f.write(image)
args = vars(ap.parse_args())
image = cv2.imread(filename)

height, width, channels = image.shape
print("shape: ", image.shape)
percent = (1200 * 100) / height
print(percent)

width = int(width * percent / 100)
height = int(height * percent / 100)

originalImage = image.copy()
# image = cv2.resize(image, (width,height))

print("shape: ", image.shape)

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help="path to the input image")
args = vars(ap.parse_args())

# load the image, convert it to grayscale, blur it
# slightly, then find edges
print(args)

# #DENSE Model
# new_model = tf.keras.models.load_model('emnist_trained_dense.h5')