Exemplo n.º 1
0
def main():

    detection_face_and_crop.main()

    with tf.Graph().as_default():
        with tf.Session() as sess:
            model = './20170512-110547/'
            # model='./20180408-102900/'
            facenet.load_model(model)
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            image = []
            nrof_images = 0

            emb_dir = './images/emb_img'
            all_obj = []
            for i in os.listdir(emb_dir):
                all_obj.append(i)
                img = misc.imread(os.path.join(emb_dir, i), mode='RGB')
                prewhitened = facenet.prewhiten(img)
                image.append(prewhitened)
                nrof_images = nrof_images + 1

            images = np.stack(image)
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            # get metrixs of images in emb_img
            compare_emb = sess.run(embeddings, feed_dict=feed_dict)
            compare_num = len(compare_emb)

            # video="http://*****:*****@192.168.137.33:8081/"
            # capture =cv2.VideoCapture(video)
            dirVideo = "video1.mp4"
            capture = cv2.VideoCapture(dirVideo)
            # capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
            # capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
            capture.set(cv2.CAP_PROP_FPS, 60)

            # size =(int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)))
            # fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
            # writeVideo = cv2.VideoWriter("aaa.avi", fourcc, 5, size)
            size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            writeVideo = cv2.VideoWriter('output.avi', fourcc, 20, size, 1)

            cv2.namedWindow("camera", 1)
            picNumber = 0
            count = 0
            frame_interval = 3
            while True:
                isSuccess, frame = capture.read()
                if (count % frame_interval == 0):
                    rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    tag, bounding_box, crop_image, = load_and_align_data(
                        rgb_frame, 160, 44)
                    if (tag):
                        feed_dict = {
                            images_placeholder: crop_image,
                            phase_train_placeholder: False
                        }
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        print(emb)
                        temp_num = len(emb)
                        fin_obj = []
                        # calculate distance between camera face and in emd_img face
                        for i in range(temp_num):
                            dist_list = []
                            for j in range(compare_num):
                                dist = np.sqrt(
                                    np.sum(
                                        np.square(
                                            np.subtract(
                                                emb[i, :],
                                                compare_emb[j, :]))))

                                dist_list.append(dist)
                            min_value = min(dist_list)
                            if (min_value > 0.65):
                                fin_obj.append('UNKNOW')
                            else:
                                fin_obj.append(
                                    all_obj[dist_list.index(min_value)][0:6]
                                )  #mini distance is face which recongnition
                        # draw rectangle
                        for rec_position in range(temp_num):
                            cv2.rectangle(frame,
                                          (bounding_box[rec_position, 0],
                                           bounding_box[rec_position, 1]),
                                          (bounding_box[rec_position, 2],
                                           bounding_box[rec_position, 3]),
                                          (0, 255, 0), 2, 8, 0)
                            cv2.putText(frame,
                                        fin_obj[rec_position],
                                        (bounding_box[rec_position, 0],
                                         bounding_box[rec_position, 1]),
                                        cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                        0.8, (0, 0, 255),
                                        thickness=2,
                                        lineType=2)
                    writeVideo.write(frame)
                    cv2.imshow('camera', frame)
                count += 1
                key = cv2.waitKey(3)
                if key == 27:
                    print("ESC break")
                    break
                if key == ord(' '):
                    picNumber += 1
                    # filename = "{}_{}.jpg".format(dirVideo, picNumber)
                    filename = "%s_%s.jpg" % (dirVideo, picNumber)
                    cv2.imwrite(filename, frame)
            capture.release()
            cv2.destroyWindow("camera")
Exemplo n.º 2
0
    u_s = cv2.getTrackbarPos("US", "tracks")
    u_v = cv2.getTrackbarPos("UV", "tracks")
    s = cv2.getTrackbarPos(switch, "tracks")

    lower_b = np.array([l_h, l_s, l_v])
    upper_b = np.array([u_h, u_s, u_v])

    mask = cv2.inRange(img_hsv, lower_b, upper_b)
    res = cv2.add(img, img, mask=mask)

    cv2.imshow("img", img)

    if s == 0:
        cv2.destroyWindow("mask")
        cv2.destroyWindow("res")
    else:
        cv2.imshow("mask", mask)
        cv2.imshow("res", res)

    k = cv2.waitKey(1)
    if k == ord("s") & s == 1:
        cv2.imwrite('1.5_temp_save_mask', mask)
        cv2.imwrite('1.5_temp_save_res', res)
        print("lower=", lower_b, "upper=", upper_b)
    elif k == 27:  # ESC to quit
        if s == 1:
            print("lower=", lower_b, "upper=", upper_b)
        break

cv2.destroyAllWindows()
Exemplo n.º 3
0
refered_pixel_coordi_lst = np.array([
    (rows[len(rows) // 2], cols[len(rows) // 2])
    for idx, (rows, cols) in enumerate(slic.labels_position) if idx != 0
])
sigma1 = 0.1
sigma2 = 0.25
# print(refered_pixel_cooordi_lst)
a = CB.blend_color(refered_pixel_coordi_lst, mask.tar_result, sigma1, sigma2)
# a[refered_pixel_cooordi_lst[0], refered_pixel_cooordi_lst[1],:] = (255,255,255)
# a[slic.labels_position[1][0], slic.labels_position[1][1]] = (0,0,255)
# a[slic.contour_mask>0] = (0,255,0)

# for i in range(len(refered_pixel_cooordi_lst)):

#     cv2.circle(a, tuple(refered_pixel_cooordi_lst[i][::-1]), 3, (0,0,255), -1)
cv2.imwrite('supa_img.png', a)

print(f'time: {time.time() - start}')

# warp_ref_img[slic.contour_mask>0] = (0,255,0)
# warp_tar_img[slic.contour_mask>0] = (0,255,0)

# cv2.imwrite('1.png', warp_tar_img)
# cv2.imwrite('2.png', warp_ref_img)

# plt.hist( np.linalg.norm(CB.get_color_diff_lst(refered_pixel_coordi_lst), axis=1), bins=1000 )
# plt.show()

for i in range(len(slic.labels_position)):
    if i != 0:
        rows, cols = slic.labels_position[i]
Exemplo n.º 4
0
 def take_picture(self, round):
     ret, frame = self.video.read()
     cv.imwrite('./choices/{}.jpeg'.format(round), frame)
Exemplo n.º 5
0
 def save_image(self) -> None:
     imwrite("/home/pi/Pictures/debugGoal/{}.png".format(time()), self._image.content)
def YOLO(args):

    logger = logging.getLogger(__name__)
    coloredlogs.install(level="DEBUG", logger=logger)

    global metaMain, netMain, altNames, imageName, HEIGHT, WIDTH
    # configPath = "./cfg/yolov3.cfg"
    # weightPath = "./yolov3.weights"
    # metaPath = "./cfg/coco.data"
    configPath = FLAGS.config
    weightPath = FLAGS.weight
    metaPath = FLAGS.meta

    if not os.path.exists(configPath):
        raise ValueError("Invalid config path `" + os.path.abspath(configPath) + "`")
    if not os.path.exists(weightPath):
        raise ValueError("Invalid weight path `" + os.path.abspath(weightPath) + "`")
    if not os.path.exists(metaPath):
        raise ValueError("Invalid data file path `" + os.path.abspath(metaPath) + "`")
    if netMain is None:
        netMain = darknet.load_net_custom(
            configPath.encode("ascii"), weightPath.encode("ascii"), 0, 1
        )  # batch size = 1
    if metaMain is None:
        metaMain = darknet.load_meta(metaPath.encode("ascii"))
    if altNames is None:
        try:
            with open(metaPath) as metaFH:
                metaContents = metaFH.read()
                import re

                match = re.search(
                    "names *= *(.*)$", metaContents, re.IGNORECASE | re.MULTILINE
                )
                if match:
                    result = match.group(1)
                else:
                    result = None
                try:
                    if os.path.exists(result):
                        with open(result) as namesFH:
                            namesList = namesFH.read().strip().split("\n")
                            altNames = [x.strip() for x in namesList]
                except TypeError:
                    pass
        except Exception:
            pass

    while True:
        image_path = input("\ninput a image to detect (press 'q' to exit): ")
        if image_path == "q":
            logger.debug("quit the code 0")
            break

        if image_path == "" and FLAGS.image != "":
            image_path = FLAGS.image
            imageName = os.path.basename(image_path)

        if os.path.isfile(image_path) == False:
            logger.error("NOT exist file: %s" % image_path)
            continue

        logger.debug("YOLO Starts detecting ...")

        prev_time = time.time()

        frame_read = cv2.imread(image_path)
        HEIGHT, WIDTH, channels = frame_read.shape

        # 创建副本
        frame_copy = np.zeros(frame_read.shape, np.uint8)
        # 复制副本, 用于后面的画框和轮廓
        frame_copy = frame_read.copy()
        # cv2.imwrite("./output/frame_copy.jpg", frame_copy)

        #  <class 'numpy.ndarray'>
        frame_rgb = cv2.cvtColor(frame_read, cv2.COLOR_BGR2RGB)

        frame_resized = cv2.resize(
            frame_rgb, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR,
        )

        # Create an image we reuse for detect
        darknet_image = darknet.make_image(WIDTH, HEIGHT, 3,)

        # darknet.copy_image_from_bytes(darknet_image, frame_read.tobytes())
        darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())

        # detect image
        detections = darknet.detect_image(
            # netMain, metaMain, darknet_image, thresh=FLAGS.thresh
            netMain,
            metaMain,
            darknet_image,
            thresh=FLAGS.thresh,
        )

        # image = cvDrawBoxes(detections, frame_rgb)
        # image = cvDrawBoxes(detections, frame_resized)
        image = cvDrawBoxes2(detections, frame_copy)

        # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # Estimated Time of Arrival
        ETA = "ETA: {} ms".format((time.time() - prev_time) * 1000)
        # print(ETA)

        # try:
        # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imwrite(FLAGS.output, image)
        # image.save(FLAGS.output)
        logger.debug("output saved to: {}, {}".format(FLAGS.output, ETA))
        # except Exception as ex:
        #     print(ex)

        if FLAGS.open:
            if 1 == 0:
                logger.error("None detected: 0")
            else:
                img2 = Image.open(FLAGS.output)
                img2.show()
Exemplo n.º 7
0
 def inpainting(self):
     img = self.destroyedImg
     mask = self.mask
     self.repaired = cv2.inpaint(img, mask, 3, cv2.INPAINT_NS)
     cv2.imwrite('repaired.jpg', self.repaired)
     App.display(self)
Exemplo n.º 8
0
            imgBigContour, imgWarpColored, imgWarpGray, imgAdaptiveThre
        ])
    else:
        imageArray = ([img, imgGray, imgThreshold,
                       imgContours], [imgBlank, imgBlank, imgBlank, imgBlank])

    #Lables
    lables = [['Original', 'Gray', 'Threshold', 'Contours'],
              [
                  'Biggest Contour', 'Warp Prespective', 'Warp Gray',
                  'Adaptive Threshold'
              ]]

    stackedImage = utlis.stackImages(imageArray, 0.75, lables)
    cv2.imshow('Result', stackedImage)

    #Save Image
    if cv2.waitKey(1) & 0xFF == ord('s'):
        cv2.imwrite('Image/image' + str(count) + '.jpg', imgWarpColored)
        cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230),
                                     int(stackedImage.shape[0] / 2) + 50),
                      (1100, 350), (0, 255, 0), cv2.FILLED)
        cv2.putText(stackedImage, 'Scan Saved',
                    (int(stackedImage.shape[1] / 2) - 200,
                     int(stackedImage.shape[0] / 2)), cv2.FONT_HERSHEY_DUPLEX,
                    3, (0, 0, 255), 5, cv2.LINE_AA)
        cv2.imshow('Result', stackedImage)
        cv2.waitKey(300)
        count += 1
        # if cv2.waitKey(1) == 13:
        #     break
Exemplo n.º 9
0
    canvas_t = canvas.copy()
    frontest = {}
    for i in range(imax):
        for j in range(jmax):
            rolled = roll(i, j, t)
            # transed = do_trans(rolled, t)
            # roted = do_rot(transed, t)
            # paper_i, paper_j = proj(roted)
            roted = do_rot(rolled, t)
            transed = do_trans(roted, t)
            paper_i, paper_j = proj(transed)
            canvas_i, canvas_j = paper2canvas(paper_i, paper_j)
            x, y = int(canvas_i), int(canvas_j)
            if (0<=x<canvas_x and 0<=y<canvas_y) and ((x,y) not in frontest or frontest[(x,y)]>transed[2]):
                frontest[(x,y)] = transed[2]
                canvas_t[y][x][:] = paper[j][i][:]
    print(f"t: {t}")
    cv2.imwrite(f"./output/roll_{t}.png", canvas_t)

# 视频制作
image_folder = 'output'
video_name = 'video/video.avi'
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
images.sort(key=lambda x:int(x[5:][:-4]))
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 20, (width,height))
for image in images:
    video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()            
def pose_main(myImagePath, imageName):
    """detect pose"""

    lower_clothing_found = 0 
    #data = request.json
    #myPose = data["myPose"]

    frame = cv2.imread(myImagePath)
    frameCopy = np.copy(frame)
    frameWidth = frame.shape[1]
    frameHeight = frame.shape[0]
    threshold = 0.1

    net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)

    t = time.time()
    # input image dimensions for the network
    inWidth = 368
    inHeight = 368
    inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
                            (0, 0, 0), swapRB=False, crop=False)

    net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)

    net.setInput(inpBlob)

    output = net.forward()
    print("time taken by network : {:.3f}".format(time.time() - t))

    H = output.shape[2]
    W = output.shape[3]

    # Empty list to store the detected keypoints
    points = []

    for i in range(nPoints):
        # confidence map of corresponding body's part.
        probMap = output[0, i, :, :]

        # Find global maxima of the probMap.
        minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
        
        # Scale the point to fit on the original image
        x = (frameWidth * point[0]) / W
        y = (frameHeight * point[1]) / H

        if prob > threshold : 
        #    cv2.circle(frameCopy, (int(x), int(y)), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
        #    cv2.putText(frameCopy, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
        #    print("point: ", i)
        #    print(int(x))
        #    print(int(y))

            # Add the point to the list if the probability is greater than the threshold
            points.append((int(x), int(y)))
        else :
            points.append(None)
    print("points: ", points)

    # Draw Skeleton
    #for pair in POSE_PAIRS:
    #    partA = pair[0]
    #    partB = pair[1]

    #    if points[partA] and points[partB]:
    #        cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2)
    #        cv2.circle(frame, points[partA], 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)

    for num, corner in enumerate(points_list):
        for i, j in enumerate(corner):
            if (points[j] is None):
                corner[i] = "NA"
            else:
                corner[i] = points[j]

        points_list[num] = [loc for loc in corner if loc != "NA"]

    for num, corner in enumerate(points_list[0:3]):
        print(corner)
        if corner == []:
            corner = 0
        else:
            for i, biPoint in enumerate(corner):
                corner[i] = biPoint[0]
            corner = min(corner)
        points_list[num] = corner

    print(points_list[3:5])
    for num, corner in enumerate(points_list[3:6]):
        print("points_list[3:5]: ", points_list[3:6])
        print(num)
        print(corner)
        if corner == []:
            corner = frameWidth
        else:
            for i, biPoint in enumerate(corner):
                corner[i] = biPoint[0]
            corner = max(corner)
        points_list[num + 3] = corner

    for num, corner in enumerate(points_list[6:9]):
        if corner == []:
            corner = 0
        else:
            for i, biPoint in enumerate(corner):
                corner[i] = biPoint[1]
            corner = min(corner)
        points_list[num + 6] = corner

    for num, corner in enumerate(points_list[9:]):
        if corner == []:
            corner = frameHeight
        else:
            for i, biPoint in enumerate(corner):
                corner[i] = biPoint[1]
            corner = min(corner)
        points_list[num + 9] = corner

    points_dict = {
        "x1_head": points_list[0], "x1_upper": points_list[1], "x1_lower": points_list[2],\
        "x2_head": points_list[3], "x2_upper": points_list[4], "x2_lower": points_list[5],\
        "y1_head": points_list[6], "y1_upper": points_list[7], "y1_lower": points_list[8],\
        "y2_head": points_list[9], "y2_upper": points_list[10], "y2_lower": points_list[11]
        }
    
    points_dict["y1_head"] = int(max(0, points_dict["y1_head"] - (points_dict["y2_head"] - points_dict["y1_head"]) / 1.5))

    points_dict["x1_upper"] = int(max(0, points_dict["x1_upper"]  - (points_dict["x2_upper"] - points_dict["x1_upper"]) * 0.1))
    points_dict["x2_upper"] = int(min(frameWidth, points_dict["x2_upper"] + (points_dict["x2_upper"] - points_dict["x1_upper"]) * 0.1))
    points_dict["y1_upper"] = int(max(0, points_dict["y1_upper"]  - (points_dict["y2_upper"] - points_dict["y1_upper"]) * 0.1))
    points_dict["y2_upper"] = int(min(frameHeight, points_dict["y2_upper"] + (points_dict["y2_upper"] - points_dict["y1_upper"]) * 0.1))

    cv2.line(frame, (points_dict['x1_head'], points_dict['y1_head']), (points_dict['x2_head'], points_dict['y1_head']), (0, 255, 255), 2) # yellow
    cv2.line(frame, (points_dict['x2_head'], points_dict['y1_head']), (points_dict['x2_head'], points_dict['y2_head']), (0, 255, 255), 2)
    cv2.line(frame, (points_dict['x2_head'], points_dict['y2_head']), (points_dict['x1_head'], points_dict['y2_head']), (0, 255, 255), 2)
    cv2.line(frame, (points_dict['x1_head'], points_dict['y2_head']), (points_dict['x1_head'], points_dict['y1_head']), (0, 255, 255), 2)

    cv2.line(frame, (points_dict['x1_upper'], points_dict['y1_upper']), (points_dict['x2_upper'], points_dict['y1_upper']), (255, 0, 255), 2) # pink
    cv2.line(frame, (points_dict['x2_upper'], points_dict['y1_upper']), (points_dict['x2_upper'], points_dict['y2_upper']), (255, 0, 255), 2)
    cv2.line(frame, (points_dict['x2_upper'], points_dict['y2_upper']), (points_dict['x1_upper'], points_dict['y2_upper']), (255, 0, 255), 2)
    cv2.line(frame, (points_dict['x1_upper'], points_dict['y2_upper']), (points_dict['x1_upper'], points_dict['y1_upper']), (255, 0, 255), 2)

    if not((points_dict['x1_lower'] == 0) & (points_dict['y1_lower'] == 0) & (points_dict['x2_lower'] == frameWidth) & (points_dict['y2_lower'] == frameHeight)):
        points_dict["x1_lower"] = int(max(0, points_dict["x1_lower"]  - (points_dict["x2_lower"] - points_dict["x1_lower"]) * 0.5))
        points_dict["x2_lower"] = int(min(frameWidth, points_dict["x2_lower"] + (points_dict["x2_lower"] - points_dict["x1_lower"]) * 0.5))
        points_dict["y1_lower"] = int(max(0, points_dict["y1_lower"]  - (points_dict["y2_lower"] - points_dict["y1_lower"]) * 0.1))
        #points_dict["y2_lower"] = int(min(frameHeight, points_dict["y2_lower"] + (points_dict["y2_lower"] - points_dict["y1_lower"]) * 0.1))
        points_dict["y2_lower"] = frameHeight

        cv2.line(frame, (points_dict['x1_lower'], points_dict['y1_lower']), (points_dict['x2_lower'], points_dict['y1_lower']), (255, 255, 0), 2) # blue
        cv2.line(frame, (points_dict['x2_lower'], points_dict['y1_lower']), (points_dict['x2_lower'], points_dict['y2_lower']), (255, 255, 0), 2)
        cv2.line(frame, (points_dict['x2_lower'], points_dict['y2_lower']), (points_dict['x1_lower'], points_dict['y2_lower']), (255, 255, 0), 2)
        cv2.line(frame, (points_dict['x1_lower'], points_dict['y2_lower']), (points_dict['x1_lower'], points_dict['y1_lower']), (255, 255, 0), 2)

        lower_image = frameCopy[points_dict['y1_lower']: points_dict['y2_lower'], points_dict['x1_lower']: points_dict['x2_lower']]
        lower_clothing_found = 1
    print("passed lower")

    #cv2.imshow('Output-Keypoints', frameCopy)
    #cv2.imshow('Output-Skeleton', frame)
    print("first show")
    head_image = frameCopy[points_dict['y1_head']: points_dict['y2_head'], points_dict['x1_head']: points_dict['x2_head']]
    upper_image = frameCopy[points_dict['y1_upper']: points_dict['y2_upper'], points_dict['x1_upper']: points_dict['x2_upper']]

    #cv2.imshow('Output-Head', head_image)
    #cv2.imshow('Output-Upper', upper_image)
    #cv2.imshow('Output-Lower', lower_image)

    cv2.imwrite(os.path.join("./static/uploaded_pictures" , str(imageName) + '_Boxed.jpg'), frame)
    cv2.imwrite(os.path.join("./static/uploaded_pictures" , str(imageName) + '_Whole.jpg'), frameCopy)
    cv2.imwrite(os.path.join("./static/uploaded_pictures" , str(imageName) + '_Head.jpg'), head_image)
    cv2.imwrite(os.path.join("./static/uploaded_pictures" , str(imageName) + '_Upper.jpg'), upper_image)
    if lower_clothing_found:
        cv2.imwrite(os.path.join("./static/uploaded_pictures" , str(imageName) + '_Lower.jpg'), lower_image)
    

    #cv2.imwrite('./client_images/images_output/Output-Keypoints.jpg', frameCopy)
    #cv2.imwrite('./client_images/images_output/Output-Skeleton.jpg', frame)

    print("Total time taken : {:.3f}".format(time.time() - t))

    #cv2.waitKey(0)

    #return jsonify({"status": "success"})
    return lower_clothing_found
Exemplo n.º 11
0
    imag2 = cv.resize(imageInCV, r)
    height, width = imag2.shape[:2]
    w, h = (40, 40) #quality of new image (the bigger the better)
    #resize image
    imageInCV = cv.resize(imageInCV, r)
    #pixelizaize picture
    temp = cv.resize(imageInCV, (w, h), interpolation=cv.INTER_LINEAR)
    output = cv.resize(temp, (width, height), interpolation=cv.INTER_NEAREST)
    #return imageInCV
    return output


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-f', action='store', dest='fileValue',
                        help='Store file value', default='data.txt')


    results = parser.parse_args()
    file = open(results.fileValue, "r")
    i = 1
    lines = file.readlines()
    for line in lines:
        cv.imwrite(f'results/{i}.png', printRegistration(line)) #save image
        i += 1
    




Exemplo n.º 12
0
    plot.scatter_plt_log(var_depth[np.logical_not(true_label)], var_aer[np.logical_not(true_label)], var_depth[true_label], var_aer[true_label], fig2, ax2, "/Users/tschmidt/repos/tgs_honours/output/TRACK_CLUSTER_" + str(i) + ".png")
    plot.hexbin_log(var_depth, var_aer, i, fig3, ax3, "/Users/tschmidt/repos/tgs_honours/output/TRACK_CLUSTER_HEX_" + str(i) + ".png")
    ####################################

    # filename = "early_" + str(i) + ".png"
    # file_path = os.path.join(OUT_DIR, filename)
    # labels_slice = np.zeros([BTD.shape[0], BTD.shape[1]])
    # labels_slice = np.where(golden_arch_mask, 255.0, labels_slice)
    # labels = np.zeros([BTD.shape[0], BTD.shape[1], 3], dtype=np.float32)
    # labels[:,:,2] = labels_slice
    # BTD_img = cv2.addWeighted(BTD_img, 1.0, labels, 0.5, 0)
    # cv2.imwrite(file_path, BTD_img)
    filename = "canny_" + str(i) + ".png"
    file_path = os.path.join(OUT_DIR, filename)
    cv2.imwrite(file_path, img)

    BTD_img2 = copy.deepcopy(BTD_img)
    filename = "NEW_MASK_" + str(i) + ".png"
    file_path = os.path.join(OUT_DIR, filename)
    labels_slice = np.zeros([BTD.shape[0], BTD.shape[1]])
    # labels_slice = np.where(golden_arch_mask, 255.0, labels_slice)
    # labels_slice = np.where(size_mask, 255.0, labels_slice)
    labels_slice = np.where(true_label, 255.0, labels_slice)
    labels = np.zeros([BTD.shape[0], BTD.shape[1], 3], dtype=np.float32)
    labels[:,:,2] = labels_slice
    # labels[:,:,1] = labels_slice2
    BTD_img2 = cv2.addWeighted(BTD_img2, 1.0, labels, 0.5, 0)
    cv2.imwrite(file_path, BTD_img2)

    # filename = "NEW_SCATTER_" + str(i) + ".png"
def createTimelapse(durationG, photosInterval, Resolution, procPhotosKeep,
                    device):
    if Resolution == '720':
        width = 1280
        height = 720
    elif Resolution == '1080':
        width = 1920
        height = 1080
    elif Resolution == '2k':
        width = 2560
        height = 1440
    elif Resolution == '4k':
        width = 3840
        height = 2160
    else:
        Resolution == '480'
        width = 640
        height = 480

    video = cv2.VideoCapture(device, cv2.CAP_DSHOW)
    video.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    video.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    if (video.isOpened() == False):
        print("Error reading video file")
        return quit

    frame_width = int(video.get(3))
    frame_height = int(video.get(4))
    size = (frame_width, frame_height)
    print(size)

    actualTime = datetime.datetime.now()

    file_name = str(
        datetime.date.today()
    ) + '_' + f'{actualTime.hour}hs{actualTime.minute}min{actualTime.second}sec'

    os.mkdir(file_name)

    #cv2.VideoWriter('filename',  cv2.VideoWriter_fourcc(*'Codec'), fps, resolution)
    #0x7634706d codec code for mp4 format

    result = cv2.VideoWriter(f'{file_name}/timelapse.mp4', 0x7634706d, 24.97,
                             size)
    resultCorr = cv2.VideoWriter(f'{file_name}/timelapse_processed.mp4',
                                 0x7634706d, 24.97, size)

    #Timelapse parameters config
    procPhotosKeep = procPhotosKeep.lower()
    delete_imgs = False

    if procPhotosKeep == 'y':
        delete_processed_imgs = False
    elif procPhotosKeep == 'n':
        delete_processed_imgs = True
    else:
        print('There was an error')

    duration = durationG * 60

    imgs_direc = f'{file_name}/timelapse_imgs'
    if not os.path.exists(imgs_direc):
        os.mkdir(imgs_direc)

    now = datetime.datetime.now()
    end = now + datetime.timedelta(seconds=duration)

    i = 0

    #takes the photos in the specified duration
    while datetime.datetime.now() < end:
        ret, frame = video.read()
        print('Time left:', end - datetime.datetime.now())
        filename = f"{imgs_direc}/{i}.jpg"
        i += 1
        cv2.imwrite(filename, frame)
        time.sleep(photosInterval)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    processed_imgs = f'{file_name}/hsv_imgs'
    eq_direc = f'{file_name}/eq_imgs'

    #Exports the timelapse without processing
    ConvertToVideo(result, imgs_direc)

    #timelapse processing
    imageProcessing.gamma_correct(imgs_direc, processed_imgs)
    imageProcessing.Histogram_EQ(processed_imgs, eq_direc)

    #Exports the timelapse with processing
    ConvertToVideo(resultCorr, eq_direc)

    if delete_processed_imgs:
        shutil.rmtree(eq_direc)

    video.release()
    result.release()
    resultCorr.release()
    print("The Timelapse it's finished.")
Exemplo n.º 14
0
from cv2 import cv2

image = cv2.imread("test.jpg")
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

cv2.imshow("Original", image_gray)
cv2.imwrite("grey_test.jpg", image_gray)

print(image_gray.shape)

cv2.waitKey(5000)
Exemplo n.º 15
0
 def __trim_and_save_image(image_path, x1, x2, y1, y2):
     image = common_operations.read_image(image_path)
     new_image = image[y1:y2, x1:x2]
     cv2.imwrite(image_path, new_image)
Exemplo n.º 16
0
diffs = []

width = len(gray_image[0])

for i in range(len(gray_image) - 1):
    total = 0
    for j in range(width):
        total += abs(int(gray_image[i][j]) - int(gray_image[i + 1][j]))

    diffs.append(total / width)

tmp_a, tmp_b = 0, 0
tmp_a_index, tmp_b_index = 0, 0

imwrite("./Gray_Image.jpg", gray_image)

for index, diff in enumerate(diffs):
    if diff > tmp_a:
        tmp_a, tmp_b = diff, tmp_a
        tmp_a_index, tmp_b_index = index, tmp_a_index

    elif diff > tmp_b:
        tmp_b = diff
        tmp_b_index = index

print(tmp_a, tmp_b)
print(tmp_a_index, tmp_b_index)

# print(type(gray_image[tmp_a]))
def cvDrawBoxes2(detections, img):
    thickness = 2
    margin = 10  # 取边框以外再加10像素

    # 0    1      3     4   5   6       7中心点、角度、最大轮廓、宽高
    # name,cls_id,score,pt1,pt2,(w, h),contours
    # detection,contours
    list_detect = []

    print("Detected {} object(s): ".format(len(detections)))
    for index, detection in enumerate(detections):
        # detection = (class, score, (x, y, w, h))
        name = detection[0].decode()
        score = detection[1]
        x, y, w, h = (
            int(detection[2][0]),
            int(detection[2][1]),
            int(detection[2][2]),
            int(detection[2][3]),
        )
        xmin, ymin, xmax, ymax = convertBack(float(x), float(y), float(w), float(h))
        pt1 = (xmin, ymin)
        pt2 = (xmax, ymax)
        # color = YOLOV3_COLORS[int(index % len(flatui))]
        # color = YOLOV3_COLORS[int(altNames.index(name) % len(flatui))]
        # color2 = (
        #     255,
        #     0,
        #     0,
        # )  # YOLOV3_COLORS[(int(altNames.index(name) + 1) % len(flatui))]

        # print(
        #     "\n\n\t{:>3d}  {:<12s}  {:.2%}   {},{},  {}".format(
        #         index + 1, name, score, pt1, pt2, (w, h)
        #     )
        # )

        # confidence = "{:.2f}".format(score * 100)

        # text = "{} {}".format(name, confidence)

        # calc contours
        # origin_x = pt1[0]
        # origin_y = pt1[1]
        # origin_w = w
        # origin_h = h

        origin_x = pt1[0] - margin
        origin_y = pt1[1] - margin
        origin_w = w + margin * 2
        origin_h = h + margin * 2
        _x = _y = _w = _h = 0  # 差值
        # 1, x 越界, 不够减
        if origin_x < 0:
            _x = 0 - origin_x  # 左边缩小 _x,宽度也要减小 _x
            origin_x = 0
            origin_w -= _x
        # 2, y 越界, 不够减
        if origin_y < 0:
            _y = 0 - origin_y
            origin_y = 0
            origin_h -= _y

        # 3, x+width 越界, 超出边界
        if origin_x + origin_w > WIDTH:
            _w = origin_x + origin_w - WIDTH
            origin_w -= _w

        # 4, y+height 越界,超出边界
        if origin_y + origin_h > HEIGHT:
            _h = origin_y + origin_h - HEIGHT
            origin_h -= _h

        # img2 = darknet.make_image(w, h, 3,)
        img2 = np.zeros([origin_w, origin_h, 3])  # create empty rect
        # img2 = img[pt1[1] : pt1[1] + h, pt1[0] : pt1[0] + w, :]
        img2 = img[
            origin_y : origin_y + origin_h,  # y:y+h
            origin_x : origin_x + origin_w,  # x:x+w
            :,
        ]

        _save_path = "./upload/{}_d_{}.jpg".format(imageName, index)
        cv2.imwrite(_save_path, img2)
        # # 获取(中心点、角度、最大轮廓、宽高)
        # contours = cvContours((origin_x, origin_y), img2)
        contours = get_contours((origin_x, origin_y), img2)
        # print("\t\t", type(contours))

        # 做数据 (detection,contour)
        list_detect.append([name, score, (pt1, pt2), (w, h), contours])
        # print(
        #     "\n-------------------\n",
        #     [name, score, (pt1, pt2), (w, h), contours],
        #     "\n+++++++++++++++++++\n",
        # )

    # print(list_detect)

    # print(tplt.format(index + 1, name, score, point_xy, angle, mr_w, mr_h))
    # tplt = "{0:>2d}  {1:<14}  {2:>5}  {3:<10}  {4:^2}  {5}x{6}"
    # tplt = "{0:>10}\t{1:^10}\t{2:<10}"

    for index, detect in enumerate(list_detect):
        # print(len(detect), type(detect[0]))
        # print(detect[0], detect[1], detect[2], detect[3], detect[4])

        name, score, xy, size, cnt = (
            detect[0],
            detect[1],
            detect[2],
            detect[3],
            detect[4],
        )
        color = YOLOV3_COLORS[int(altNames.index(name) % len(flatui))]
        color2 = (
            255,
            0,
            0,
        )  # YOLOV3_COLORS[(int(altNames.index(name) + 1) % len(flatui))]

        # 画识别框
        # cv2.rectangle(img, xy[0], xy[1], color, 2)

        # 画轮廓框contours
        if cnt:
            # 中心点、角度、最大轮廓、宽高
            # print(
            #     "\t\tcenter={} , angle={}° , cnt={} , size={}".format(
            #         cnt[0], cnt[1], cnt[2], cnt[3]
            #     )
            # )

            point_xy, angle, box, (mr_w, mr_h) = cnt
            # 画轮廓
            cv2.drawContours(img, [box], 0, color, 2)
            # 画name
            color = YOLOV3_COLORS[int(altNames.index(name) % len(flatui))]
            cv2.putText(
                img,
                name,
                (point_xy[0] - 20, point_xy[1] - 10),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.6,
                color,  # =[255, 255, 255],
                2,
                # 1,
                # 1,
            )
            # 画文字背景
            # cv2.rectangle(
            #     img,
            #     (point_xy[0] - mr_w / 2, point_xy[1] - 10),
            #     (point_xy[0] - mr_w / 2, point_xy[1] - 10),
            #     color,
            #     cv2.FILLED,
            # )

            # 画角度
            # cv2.putText(
            #     img,
            #     angle,
            #     (point_xy[0] - 5, point_xy[1] + 10),
            #     cv2.FONT_HERSHEY_SIMPLEX,
            #     0.5,
            #     [255, 255, 255],
            #     # 1,
            #     # 1,
            #     # 1,
            # )
            # cv2.putText(
            #     img,
            #     angle,
            #     (point_xy[0] - 5, point_xy[1] + 10),
            #     cv2.FONT_HERSHEY_SIMPLEX,
            #     fontScale=0.5,
            #     color=[255, 255, 255],
            #     thickness=1,
            # )

            # for cnt in contours:
            #     # print("\n", cnt[2], "\n")
            #     cv2.drawContours(img, [cnt[2]], 0, color2, 2)

            # 画文字背景
            # (text_width, text_height) = cv2.getTextSize(
            #     name, cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, thickness=1
            # )[0]
            # print(text_width, text_height)

            # 文字背景框 x,y,w,h
            # back_w = text_width + thickness + thickness
            # back_h = text_height + thickness + thickness
            # back_x = pt1[0]
            # back_y = pt1[1] - back_h
            # back_x2 = back_x + back_w
            # back_y2 = back_y + back_h

            # if back_y < 0:
            #     back_y = 0
            #     back_y2 = back_h

            # cv2.rectangle(img, (back_x, back_y), (back_x2, back_y2), color, cv2.FILLED)

            # 画文字
            # cv2.putText(
            #     img,
            #     name,
            #     (pt1[0], pt1[1] - 5),
            #     cv2.FONT_HERSHEY_SIMPLEX,
            #     0.5,
            #     [0, 0, 0],  # color,  # [0, 255, 0],
            #     thickness=1,
            # )

            print(
                "\t{:>2d}  {:<16s}  {:.2%}  {},  {}°,  {}x{}".format(
                    index + 1, name, score, point_xy, angle, mr_w, mr_h
                )
            )
            # print(tplt.format(name, score, angle, chr(12288)))

        # print(
        #     "\n\n\t{:>3d}  {:<12s}  {:.2%}   {},{},  {}".format(
        #         index + 1, name, score, pt1, pt2, (w, h)
        #     )
        # )

    return img
def OCR(path, filename):
    # Reading the image file

    image = cv2.imread(path / filename)
    image = imutils.resize(image, width=500)
    cv2.imshow("Orignal Image", image)

    # Image Conversion to grayscale

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cv2.imshow("Gray Scale Image", gray)
    #cv2.waitKey(0)

    # now we will reduce noise from the image and make it smooth

    gray = cv2.bilateralFilter(gray, 11, 17, 17)
    cv2.imshow("Smoother Image", gray)
    #cv2.waitKey(0)

    #so now we will find the edges of the images
    edged = cv2.Canny(gray, 170, 200)
    cv2.imshow("Canny edge", edged)
    #cv2.waitKey(0)

    # Find the contours based on the images
    cntns, new = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                  cv2.CHAIN_APPROX_SIMPLE)

    # Copy orignal image to draw all the contours
    image1 = image.copy()
    cv2.drawContours(
        image1, cntns, -1, (0, 255, 0),
        3)  # this values are fixed to draw all the contours in an image
    cv2.imshow("Canny after contouring", image1)
    #cv2.waitKey(0)

    # Reverse the order of sorting

    cntns = sorted(cntns, key=cv2.contourArea, reverse=True)[:30]
    NumberPlateCount = 0

    #Because currently we don't have any contour or you can say it will show how many number plates are there in image
    #To draw top 30 contours  we will  make copy of original image and use
    #Use because we don't want to edit anything in our original image

    image2 = image.copy()
    cv2.drawContours(image2, cntns, -1, (0, 255, 0), 3)
    cv2.imshow("Top 30 contours", image2)
    #cv2.waitKey(0)

    # We  will run a loop on our contours to find the best possible contour of our expectes number plate
    count = 0
    name = 1  #name of our cropped image

    for i in cntns:
        perimeter = cv2.arcLength(i, True)
        # perimeter is also called as arclengthand we can find directly in python  using arclenght function
        approx = cv2.approxPolyDP(i, 0.02 * perimeter, True)
        #approxPolyDP we have used because it approximates the curve of polygon with the precision
        if (
                len(approx) == 4
        ):  # 4 means it has 4 corner which will be most probably our number plate as it also has 4 corner
            NumberPlateCount = approx
            #now we will crop that rectangle part
            x, y, w, h = cv2.boundingRect(i)
            crp_img = image[y:y + h, x:x + w]

            cv2.imwrite("cropped" + '.jpg', crp_img)
            name += 1

            break

    #now we will draw contour in our main image that we have identified as a number plate
    cv2.drawContours(image, [NumberPlateCount], -1, (0, 255, 0), 3)
    cv2.imshow("Final image", image)
    #cv2.waitKey(0)

    #We will crop only the part of number plate
    crop_img_loc = 'cropped.jpg'
    cv2.imshow("cropped image", cv2.imread(crop_img_loc))

    text = pytesseract.image_to_string(crop_img_loc, lang="eng")
    print('Number is:', text)
    cv2.waitKey(0)
Exemplo n.º 19
0
                        type=float,
                        help=u"detection threshold")
    args = parser.parse_args()
    graph = load_graph(args.model)

    # for op in graph.get_operations():
    #     print(op.name)

    imgcv, imgcv_resized, img_input = preprocess(args.img)

    with tf.Session(graph=graph) as sess:
        detections = sess.run('output:0', feed_dict={'input:0': img_input})

    meta = {
        'object_scale': 5,
        'classes': 1,
        'out_size': [17, 17, 30],
        'colors': [(0, 0, 254)],
        'thresh': args.thresh,
        'anchors':
        [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52],
        'num': 5,
        'labels': ['figure']
    }

    outboxes, detected = postprocess(meta, detections, imgcv)
    cv2.imwrite(args.out, detected)

    print("Detected %d figures" % len(outboxes))
    print("Saved to %s" % args.out)
Exemplo n.º 20
0
import ResNetLSTM as rnl
n=rnl.net()#初始化
n.loadData()#读取数据集
n.load('2019-08-14-18-38-43.pth')#读取存档
n.train()#开始训练
n.eval('Q:\workplace\code\python\ResNetLSTM\eval\\2019-8-29-22-36-2')#测试
#(注意这里路径的2019前多加了一个斜杠是为了转译数字)

在交互式下可以直接取出读取好的数据
a,b=n.data,n.label在另一模型使用

更改代码后需要重新加载
import importlib
importlib.reload(rnl)
n.data,n.label=a,b
'''
if __name__ == '__main__':
    n = net()
    n.loadData()
    n.train()
'''
附:
图片切割可用同目录下的image_preprocess.py
注意:居中切割成224x224,小于224x224没有测试过
import image_preprocess as ip
a=ip.processor()
import cv2.cv2 as cv2
img=cv2.imread('o (1).jpg')
img=a.adjust(img)
cv2.imwrite('123.jpg',img)
'''
Exemplo n.º 21
0
        r = requests.get(image_url, stream=True)
        # Check if the image was retrieved successfully
        if r.status_code == 200:
            #Set decode_content calue to True, otherwise the downloaded image file's size will be zero.
            r.raw.decode_content = True

            # Open a file in destination with wb (write binary) perminssion.
            with open("image_now.png", "wb") as f:
                shutil.copyfileobj(r.raw, f)
        else:
            print(r.status_code)
            print(image_url)
            print("Image couldn\'t be retrieved.")
        if flag == 0:
            image0 = cv2.imread("image_now.png", cv2.IMREAD_UNCHANGED)
            cv2.imwrite("image_old.png", image0)
        else:
            image1 = cv2.imread("image_now.png", cv2.IMREAD_UNCHANGED)
            image2 = cv2.imread("image_old.png", cv2.IMREAD_UNCHANGED)
            image2 = np.concatenate((image1, image2), axis=0)
            cv2.imwrite("image_old.png", image2)
        flag = flag + 1
    if flag_y == 0:
        image5 = cv2.imread("image_old.png", cv2.IMREAD_UNCHANGED)
        cv2.imwrite("image_old_y.png", image5)
    else:
        image3 = cv2.imread("image_old_y.png", cv2.IMREAD_UNCHANGED)
        image4 = cv2.imread("image_old.png", cv2.IMREAD_UNCHANGED)
        image3 = np.concatenate((image3, image4), axis=1)
        cv2.imwrite("image_old_y.png", image3)
    flag_y = flag_y + 1
Exemplo n.º 22
0
print("[INFO] stage 1: extracting features!")
kp1 = sift.detect(img1, None)
kp2 = sift.detect(img2, None)

# showing keypoints in images
img11 = cv2.drawKeypoints(img1,
                          kp1,
                          None,
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
img22 = cv2.drawKeypoints(img2,
                          kp2,
                          None,
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

# storing the image with its keypoints
cv2.imwrite("images/outputs/sift_keypoints_img1.jpg", img11)
cv2.imwrite("images/outputs/sift_keypoints_img2.jpg", img22)

# plt.imshow(img1), plt.show()
# plt.imshow(img2), plt.show()

print("[INFO] stage 2: computing description!")
kp1, des1 = sift.compute(img1, kp1)
kp2, des2 = sift.compute(img2, kp2)

print("[INFO] descriotion size of SIFT = {}".format(sift.descriptorSize()))

print("[INFO] stage 3: matching features!")

bf = cv2.BFMatcher()
Exemplo n.º 23
0
    if k == ord('s'):
        if len(fotos_desconocidos) == 1:
            (video_x, video_y, video_w,
             video_h) = cv2.getWindowImageRect('Video')
            print(video_x, video_y, video_w, video_h)
            print(roi)
            '''
            if not flagCaptura:
                if ((top-20)<):
                    top = 0
                if ((bottom+20)>
                
                if ((left-20)
            '''
            nombreImagen = input("Ingrese el nombre de la imagen: ")
            cv2.imwrite("img/" + nombreImagen + ".jpg", roi)
            known_face_encodings, known_face_names, listadoImagenes = cargaImagenes(
            )
            flagCaptura = not flagCaptura
        else:
            cv2.putText(
                frame,
                'Para realizar una carga solo debe aparecer una persona en el video',
                (50, 50), font, 0.8, (0, 0, 0), 2)

    elif k == ord('q'):
        break
    elif k == ord('a'):
        print(ix, iy)

# Release handle to the webcam
Exemplo n.º 24
0
from cv2 import cv2
import numpy as np
from matplotlib import pyplot as plt

img_rgb = cv2.imread('board_example.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('smallgoldgray.png', 0)
w, h = template.shape[::-1]

res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.8

loc = np.where(res >= threshold)
print(loc)
# cv2.imwrite('res.png', img_gray)
# cv2.imwrite('template_gray.png', template_gray)

for pt in zip(*loc[::-1]):
    cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
    
cv2.imwrite('res.png', img_rgb)
Exemplo n.º 25
0
 def save(self, name, image):
     """保存图片"""
     cv2.imwrite(self.out_dir + name, image)
Exemplo n.º 26
0
from cv2 import cv2 as cv
import numpy as np
from PIL import Image
import PIL

path = input("Enter Path relative to the current directory: ")
img=cv.imread(str(path))

cannied_img=cv.Canny(img,220,250)
output_path_name = input("Enter Output Path Name: ")
status = cv.imwrite(output_path_name, cannied_img)
 
print("Image written to file-system : ", status)

cv.waitKey(3)
Exemplo n.º 27
0
                count += 1

                if i == 0:
                    concatenate = torch.cat([a, output], 0)
                    concatenate = concatenate.detach()
                    concatenate = concatenate.cpu()
                    concatenate = torchvision.utils.make_grid(concatenate,
                                                              nrow=4,
                                                              normalize=True,
                                                              pad_value=255)

                    concatenate = 255 - concatenate.numpy() * 255
                    concatenate = np.transpose(concatenate, (1, 2, 0))
                    imgName = 'Epoch%d.jpg' % epoch
                    imgName = resultDir + imgName
                    cv2.imwrite(imgName, concatenate)
                    pass

            print('[%d] testing loss: %.3f' %
                  (epoch + 1, testing_loss / count))
            print('Time is ',
                  time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                  end=' ')
            using_time = time.time() - start_time
            hours = int(using_time / 3600)
            using_time -= hours * 3600
            minutes = int(using_time / 60)
            using_time -= minutes * 60
            print('running %d h,%d m,%d s' % (hours, minutes, int(using_time)))

    if trainORtest == 'test':
Exemplo n.º 28
0
from time import sleep
import requests
import sys
import json
check = True

cam = cv2.VideoCapture(int(sys.argv[1]))

detector = cv2.QRCodeDetector()
while True:
    while check:
        _, img = cam.read()
        try:
            data, bbox, _ = detector.detectAndDecode(img)
            if data:
                cv2.imwrite("testimage.png", img)
                check = False
                r = requests.get(f'http://localhost:6969/ballena/pollo/{data}')
                # reponse = json.loads(r.text)
                # print(reponse['data']['message'])
        except Exception as err:
            pass
        cv2.imshow("img", img)
        if cv2.waitKey(1) == ord("Q"):
            break
    if not check:
        sleep(1)
        check = True

cam.release()
cv2.destroyAllWindows()
Exemplo n.º 29
0
rename = {
    "U": "yellow",
    "B": "red",
    "R": "blue",
    "D": "white",
    "L": "green",
    "F": "orange"
}
for clr in calib["colors"]:
    e = calib["colors"][clr]  # blue
    min_hsv = np.array(e["min"])
    max_hsv = np.array(e["max"])
    maskHSV = np.full((h, w, c), (0, 0, 0), dtype=np.uint8)

    if min_hsv[0] <= max_hsv[0] and min_hsv[1] <= max_hsv[1] and min_hsv[
            2] <= max_hsv[2]:
        maskHSV = cv2.inRange(hsv_img, min_hsv, max_hsv)
    else:
        # hue overflow (red)
        mask1 = cv2.inRange(hsv_img, min_hsv,
                            np.array([255, max_hsv[1], max_hsv[2]]))
        mask2 = cv2.inRange(hsv_img, np.array([0, min_hsv[1], min_hsv[2]]),
                            max_hsv)
        # add masks
        maskHSV = cv2.add(mask1, mask2)

    resultHSV = cv2.bitwise_and(white, white, mask=maskHSV)
    cv2.imwrite("{color}.png".format(color=rename[clr]), resultHSV)

cap.release()
Exemplo n.º 30
0
# Prepare the variables that will be used
subbed_img_index = []
starts = []
ends = []
filename = os.path.splitext(os.path.basename(video))[0]
sub = open(filename+".srt","a+",encoding='utf-8')

print("(1/4) Extracting pictures to a temporary folder...")

for i in tqdm(range(end), unit="frames"):
    ret, frame = video_cap.read()
    if i >= start:
        height, width = frame.shape[:2]
        cropped = frame[int(height*0.8):height,int(width*0.1):int(width*0.9)]
        writing = cv2.imwrite(folder+str(i)+".jpg",cropped)

print("(2/4) Scanning for the hardsubbed pictures...")

for i in tqdm(range(start, end),unit="pics"):
    img = folder+str(i)+'.jpg'
    if is_hardsubbed_img(img, middle_line):
        subbed_img_index.append(i)

print("(3/4) Retrieving timings...")

for i in subbed_img_index:
    # If a frame is hardsubbed, the frame before it ISN'T hardsubbed, and the frame after it IS hardsubbed,
    # then this frame marks the beginning of a dialogue.
    # If a frame is hardsubbed, the frame before it IS hardsubbed, and the frame after it ISN't hardsubbed,
    # then this frame marks the end of a dialogue.