Beispiel #1
0
    def __call__(self, image):
        image = F.resize(image, 256)
        gray = np.array(F.to_grayscale(image))

        detections = self.bounding_boxes(gray)
        if not detections:
            return self.no_glasses_image

        face_rect = detections[0].rect
        aligned_face = self.aligner.align(opencv2matplotlib(np.array(image)),
                                          gray, face_rect)
        return opencv2matplotlib(aligned_face)
def show_labels(labeled_img, labels_im, masked):
    fig, axes = plt.subplots(1, 3, figsize=(18, 10))
    axes = axes.reshape(-1, )
    axes[0].imshow(imutils.opencv2matplotlib(labeled_img))
    axes[0].set_title("hue ordered labels")
    cm, axes[1], cmap = discrete_matshow(labels_im, axes[1])
    _ = fig.colorbar(cm,
                     ticks=np.arange(np.min(labels_im),
                                     np.max(labels_im) + 1),
                     ax=axes[1])
    axes[2].imshow(imutils.opencv2matplotlib(masked))
    axes[2].set_axis_off()
    axes[2].set_title("Masked")
    plt.show()
Beispiel #3
0
def auto_canny(img_path):
    img = cv2.imread(img_path, 0)
    img_canny = imutils.auto_canny(img)
    img_skeleton = imutils.skeletonize(img, size=(3, 3))

    plt.subplot(131)
    plt.imshow(imutils.opencv2matplotlib(img_canny))
    plt.axis('off')
    plt.subplot(132)
    plt.imshow(imutils.opencv2matplotlib(img))
    plt.axis('off')
    plt.subplot(133)
    plt.imshow(imutils.opencv2matplotlib(img_skeleton))
    plt.axis('off')
    plt.show()
Beispiel #4
0
def main():
    client = get_mqtt_client()
    # if MQTT_USER != "" and MQTT_PWD != "":
    client.username_pw_set(MQTT_USER, MQTT_PWD)
    client.connect(MQTT_BROKER, port=MQTT_PORT)
    time.sleep(4)  # Wait for connection setup to complete
    client.loop_start()

    # Open camera
    camera = WebcamVideoStream(src=VIDEO_SOURCE).start()
    # time.sleep(2)  # Webcam light should come on if using one

    while True:
        frame = camera.read()

        font = cv2.FONT_HERSHEY_SIMPLEX
        datet = datetime.datetime.now().strftime("%d.%m.%Y, %H:%M:%S")
        frame = cv2.putText(frame, datet, (10, 20), font, 0.7, (255, 255, 255),
                            1, cv2.LINE_AA)
        # cv2.imshow('frame', frame)

        np_array_RGB = opencv2matplotlib(frame)  # Convert to RGB

        image = Image.fromarray(np_array_RGB)  #  PIL image
        byte_array = pil_image_to_byte_array(image)
        client.publish(MQTT_TOPIC_CAMERA, byte_array, qos=MQTT_QOS)
        now = get_now_string()
        print(f"published frame on topic: {MQTT_TOPIC_CAMERA} at {now}")
def hue_map(ax):
    # creating hue map
    map_hsv = np.tile(np.arange(180), 200).reshape(-1, 180)
    hmap = imshow_components(map_hsv)
    ax.imshow(imutils.opencv2matplotlib(hmap))
    ax.set_title("Hue Map")
    return ax
Beispiel #6
0
def show_img(x, ax=None, figsize=(12, 13)):
    gray = True if len(x.shape) == 2 else False
    if ax is None: _, ax = plt.subplots(figsize=figsize)
    if gray: ax.imshow(x, 'gray')
    else: ax.imshow(imutils.opencv2matplotlib(x))
    ax.set_axis_off()
    return ax
def display_random_set(data, labels):
    for i in range(10):
        random_val = np.random.randint(low=0, high=len(data))
        plt.subplot(2, 5, (i + 1))
        plt.imshow(imutils.opencv2matplotlib(data[random_val]))
        plt.title(labels[random_val])
        plt.axis(False)
    plt.show()
def work_thread(cam=0, pData=0, nDataSize=0):
    stFrameInfo = MV_FRAME_OUT_INFO_EX()
    memset(byref(stFrameInfo), 0, sizeof(stFrameInfo))
    while True:
        ret = cam.MV_CC_GetOneFrameTimeout(byref(data_buf), nPayloadSize,
                                           stDeviceList, 1000)
        if ret == 0:
            print("get one frame: Width[%d], Height[%d], nFrameNum[%d]" %
                  (stDeviceList.nWidth, stDeviceList.nHeight,
                   stDeviceList.nFrameNum))

            nRGBSize = stDeviceList.nWidth * stDeviceList.nHeight * 3
            stConvertParam = MV_CC_PIXEL_CONVERT_PARAM()
            memset(byref(stConvertParam), 0, sizeof(stConvertParam))
            stConvertParam.nWidth = stDeviceList.nWidth
            stConvertParam.nHeight = stDeviceList.nHeight
            stConvertParam.pSrcData = data_buf
            stConvertParam.nSrcDataLen = stDeviceList.nFrameLen
            stConvertParam.enSrcPixelType = stDeviceList.enPixelType
            stConvertParam.enDstPixelType = PixelType_Gvsp_RGB8_Packed
            stConvertParam.pDstBuffer = (c_ubyte * nRGBSize)()
            stConvertParam.nDstBufferSize = nRGBSize

            ret = cam.MV_CC_ConvertPixelType(stConvertParam)
            if ret != 0:
                print("convert pixel fail! ret[0x%x]" % ret)
                del data_buf
                sys.exit()

            # file_path = "AfterConvert_RGB.raw"
            # file_open = open(file_path.encode('ascii'), 'wb+')
            try:
                img_buff = (c_ubyte * stConvertParam.nDstLen)()
                cdll.msvcrt.memcpy(byref(img_buff), stConvertParam.pDstBuffer,
                                   stConvertParam.nDstLen)
                aa = np.frombuffer(img_buff,
                                   dtype=np.uint8,
                                   count=-1,
                                   offset=0)
                b = np.max(aa)
                c = np.min(aa)
                resized = aa.reshape(
                    (stConvertParam.nHeight, stConvertParam.nWidth, 3))
                # resized = imutils.resize(aa, width=1280)
                # img = imutils.resize(aa,width=stConvertParam.nWidth )
                cv2.imwrite("2222.png", resized)
                cv2.imshow('', imutils.opencv2matplotlib(resized))
                cv2.waitKey()
                # file_open.write(img_buff)
            except:
                raise Exception("save file executed failed:%s" % e.message)
            # finally:
            # file_open.close()
        else:
            print("get one frame fail, ret[0x%x]" % ret)

        print("convert pixeltype succeed!")
Beispiel #9
0
def read_a_pic_reconstruct_slim(pic_path, imageheight, imagewidth):
    img = cv2.imread(pic_path)
    img = image_preprocess.preprocess_for_test(img, imageheight, imagewidth)
    img = imutils.opencv2matplotlib(img)
    img_list = []
    img = list(img)
    img_list.append(img)
    # pic_base = os.path.basename(pic_path)
    return img_list
Beispiel #10
0
def plot_images(images, names, figsize=[15, 10], rows=1, cols=None):
    plt.figure(figsize=figsize)
    for i, (name, img) in enumerate(zip(names, images)):
        if len(img.shape) == 3:
            img = imutils.opencv2matplotlib(img)
        if cols is None:
            cols = len(images) // rows
        plt.subplot(rows, cols, i + 1)
        plt.imshow(img, cmap='gray')
        plt.title(name)
    plt.show()
Beispiel #11
0
def plot3DHist(image):
    ''' Draw a histogram for the 3 channels'''
    hist = cv2.calcHist([image], [0, 1, 2],
                        None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
    print("3D histogram shape: {}, with {} values".format(
        hist.shape, hist.flatten().shape[0]))

    ''' display the original input image '''
    plt.figure()
    plt.axis("off")
    plt.imshow(imutils.opencv2matplotlib(image))
    plt.show()
Beispiel #12
0
    def getSoftmax(image_filepath):
        import cv2
        import numpy as np

        image = imutils.opencv2matplotlib(cv2.imread(image_filepath))

        image = image_preprocessing_fn(image, 224, 224)

        image = image.eval(session=sess)

        result_feature = sess.run('resnet_v1_50/predictions/Softmax:0',
                                  feed_dict={'input:0': [image]})

        return np.ravel(result_feature)
    def getFeature(image_filepath):

        img = cv2.imread(image_filepath)
        if img is not None:
            image = imutils.opencv2matplotlib(img)

            image = sess.run(image_preprocessing, feed_dict={'img:0': image})

            result_feature = sess.run("resnet_v1_50/pool5:0",
                                      feed_dict={'input:0': [image]})

            return np.ravel(result_feature)
        else:
            return None
Beispiel #14
0
def orb_similarities(
    file1,
    file2,
    show_matches=False
):  # return similarity (matching rate) between iPenvmg1 and img2
    method = 'ORB'  # 'SIFT'
    lowe_ratio = 0.89

    img1 = cv2.imread(os.path.join(file1), cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(os.path.join(file2), cv2.IMREAD_GRAYSCALE)
    size = (img1.shape[1], img1.shape[0])
    img2 = cv2.resize(img2, size, interpolation=cv2.INTER_AREA)

    # -- Traitement ---
    # Find keypoints with ORB
    finder = cv2.ORB_create()
    kp1, des1 = finder.detectAndCompute(img1, None)
    kp2, des2 = finder.detectAndCompute(img2, None)

    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(des1, des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    result_img = imu.opencv2matplotlib(
        cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2))

    # Compute matching rate
    mr = 1 / (1 + np.mean(list(m.distance for m in matches)))

    if (show_matches):

        fig, ax = plt.subplots(ncols=1)
        ax.imshow(result_img)
        ax.set_title('Query_img vs. database_img')
        plt.show()
        '''
        fig, ax = plt.subplots()
        im=ax.imshow(result_img)
        tit=ax.set_title ('Query_img vs. database_img')
        '''

    return mr


#EndRegion
def read_a_pic_reconstruct_slim(pic_path, dim):
    img = cv2.imread(pic_path)
    train_image_size = utils.global_var._RESIZE_SIDE_MIN
    img = data_preprocessing.image_preprocess.preprocess_for_test(
        img, train_image_size, train_image_size)
    img = imutils.opencv2matplotlib(img)
    img_list = []
    img = list(img)
    img_list.append(img)
    pic_base = os.path.basename(pic_path)
    items = pic_base.split('_')
    if len(items) > 2:
        label = utils.data_helper.num2label(int(items[1]), dim)
        labels = []
        labels.append(label)
    else:
        labels = np.zeros([1, dim], dtype=np.float32)
    return img_list, labels
Beispiel #16
0
def detect_demo():
    """ 图像检测器 """
    yolov5_detector = YOLOv5Detector(weights='weights/yolov5s.pt', conf_thres=0.25)
    window_name = 'YOLOv5 detector'
    while True:
        im = ImageGrab.grab(bbox=(0, 0, 600, 800))
        frame = np.array(im)
        image_result, bbox_container = yolov5_detector(frame)
        for info in bbox_container:
            print(info)
        print('---')
        """ 显示图像 """
        cv2.imshow(window_name, imutils.opencv2matplotlib(image_result))
        cv2.waitKey(1)
        """ 点 x 退出 """
        if cv2.getWindowProperty(window_name, cv2.WND_PROP_AUTOSIZE) < 1:
            break
    cv2.destroyAllWindows()
Beispiel #17
0
def main():
    model = GlassesClassifier()
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    model.load_state_dict(torch.load('checkpoint.pt', map_location=device))
    model.train(False)
    align = FaceAlignTransform(
        detector_model='mmod_human_face_detector.dat',
        shape_predictor='shape_predictor_5_face_landmarks.dat')
    tensorize = ToTensor()

    winname = 'Am I wearing eyeglasses?'
    cv2.namedWindow(winname)
    vc = cv2.VideoCapture(0)
    vc.set(cv2.CAP_PROP_FPS, 60)

    rval, frame = vc.read()

    while True:
        if frame is not None:
            frame = cv2.resize(frame, (320, 180))

            image = opencv2matplotlib(frame)
            bboxes = align.bounding_boxes(
                cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
            image = Image.fromarray(image)
            image = tensorize(align(image))
            image = image.unsqueeze(0)
            pred = model.forward(image)
            _, labels = torch.max(pred.data, 1)

            if labels.item() == 1:
                put_text(frame, BOTTOM_LEFT_CORNER, 'Yes!')
            else:
                put_text(frame, BOTTOM_LEFT_CORNER, 'No...')
            for bbox in bboxes:
                (x, y, w, h) = face_utils.rect_to_bb(bbox.rect)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.imshow(winname, frame)
        rval, frame = vc.read()

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Beispiel #18
0
def main():
    client = get_mqtt_client()
    client.connect(MQTT_BROKER, port=MQTT_PORT)
    time.sleep(4)  # Wait for connection setup to complete
    client.loop_start()

    # Open camera
    camera = WebcamVideoStream(src=VIDEO_SOURCE).start()
    time.sleep(2)  # Webcam light should come on if using one

    while True:
        frame = camera.read()
        np_array_RGB = opencv2matplotlib(frame)  # Convert to RGB

        image = Image.fromarray(np_array_RGB)  #  PIL image
        byte_array = pil_image_to_byte_array(image)
        client.publish(MQTT_TOPIC_CAMERA, byte_array, qos=MQTT_QOS)
        now = get_now_string()
        print(f"published frame on topic: {MQTT_TOPIC_CAMERA} at {now}")
        time.sleep(1 / FPS)
def main():
    classes = ['mask_weared_incorrect', 'with_mask', 'without_mask']
    model = load_model('./mask_status.h5')
    cascade_classifier = cv2.CascadeClassifier('./cascade_classifier/haarcascade_frontalface_alt2.xml')

    for img in os.listdir('./Test/'):
        img_path = './Test/' + img
        img = cv2.imread(img_path)
        face_coords = get_faces_coords(img=img, classifier=cascade_classifier)
        for face in face_coords:
            x, y, w, h = face
            roi = img[y:y+h, x:x+w] / 255
            prediction = get_prediction(face=roi, model=model)
            prediction = classes[np.argmax(prediction)]
            if prediction == 'with_mask':
                color = (0, 255, 0)
            else:
                color = (0, 0, 255)
            cv2.rectangle(img, pt1=(x, y), pt2=(x+w, y+h), color=color, thickness=2)
            cv2.putText(img=img, text=prediction, org=(x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=color, thickness=2)

        plt.imshow(imutils.opencv2matplotlib(img))
        plt.show()
def change_format(image_path):
    global image_sum, now_number, last_time, last_number
    nature_image_path = image_path
    image_path = image_path.replace(base_path, save_path)
    image_path = image_path.replace(na_format, af_format)
    if os.path.exists(image_path) is False:
        file_tools.check_fold(file_tools.getFloderOfFileJustPath(image_path))
        img = cv2.imread(nature_image_path)
        if img is not None:
            img = Image.fromarray(
                imutils.opencv2matplotlib(cv2.resize(img, (1024, 1024))))
            img.save(image_path)
        else:
            if os.path.exists(nature_image_path) is False:
                print '无法读取webp文件(文件不存在)...%s' % nature_image_path
            else:
                print '无法读取webp文件...%s' % nature_image_path
    now_number += 1
    if now_number % 100 == 0:
        print '正在进行转换...%d/%d(%.2f/sec)' % (now_number, image_sum,
                                            (float(now_number - last_number) /
                                             (time.time() - last_time)))
        last_number = now_number
        last_time = time.time()
Beispiel #21
0
def save_results(image_name, image, image_horizon, image_sun, horizon_flag,
                 sun_flag, daytime):
    # fill result pdf file

    content = []
    output_file_name = image_name[:-4] + '.pdf'
    output_file = PdfPages(output_file_name)

    # adjust spacing
    plt.subplots_adjust(left=None,
                        bottom=None,
                        right=None,
                        top=1,
                        wspace=1,
                        hspace=None)

    # build content for result pdf
    # [image, image_title, image_text]
    # main page
    text = 'Image \"' + image_name + '\" processing'
    content.append([image, None, text])

    # conditions page
    text = 'Conditions: ' + daytime
    content.append([image, text, None])

    # horizon page
    text = 'horizon detected' if horizon_flag else 'horizon line is unclear'
    content.append([image_horizon, text, None])

    # sun page
    if daytime != 'nighttime':
        text = 'sun located' if sun_flag else 'no sun spot located'
        content.append([image_sun, text, None])

    # save result into pdf format
    for item in content:
        fig = plt.figure()
        ax = fig.add_subplot(111)

        # if title present
        if item[2]:
            fig.suptitle(item[2], fontsize=12, fontweight='bold')

        # if text present
        if item[1]:
            ax.text(image.shape[1] * 2.5,
                    image.shape[0],
                    item[1],
                    style='italic',
                    bbox={
                        'facecolor': 'orange',
                        'alpha': 0.5,
                        'pad': 5
                    })

        plt.imshow(imutils.opencv2matplotlib(item[0]))
        fig.tight_layout()
        plt.axis('off')
        output_file.savefig()

    output_file.close()

    # save result into image format
    png_filename = image_name[:-4] + '_processed.png'

    res_img = image
    for item in content:
        # if text present
        if item[1]:
            image_add = put_text(item[0], item[1])
            res_img = np.concatenate((res_img, image_add), axis=0)

    # plt.imshow(imutils.opencv2matplotlib(res_img))
    cv2.imwrite(png_filename, res_img)

    print 'check', output_file_name, 'or', png_filename, 'for results'
Beispiel #22
0
def plt_imshow_bgr(image):
    plt.imshow(imutils.opencv2matplotlib(image))
    plt.show()
Beispiel #23
0
    def dataSynchronization(self, result, img_list, model, alarm_type, Zone,
                            Channel, device_id, pub, Polygon_list):

        cls_dict = get_cls_dict(model.split('_')[-1])
        vis = BBoxVisualization(cls_dict)

        for i in range(len(result)):
            boxes, confs, clss = result[i][0], result[i][1], result[i][2]
            img, txt = vis.draw_bboxes(img_list[0][i], boxes, confs, clss)
            alarm = str(txt).split(' ')[0]
            print(alarm)

            if alarm in COCO_CLASSES_LIST:
                if alarm == 'fire':
                    np_array_RGB = opencv2matplotlib(img)  # Convert to RGB
                    image = Image.fromarray(np_array_RGB)  # PIL image
                    byte_array = pil_image_to_byte_array(image)
                    baseData = im_pb2.BaseData()
                    baseData.img = byte_array
                    baseData.app_id = Zone
                    baseData.channel_id = 'channel' + str(Channel)
                    baseData.fps = 20
                    baseData.timestamp = time.time()
                    self.last_fire_alarm_time = time.time()
                    baseData.alarm_type = 'Firesmoke'
                    self.last_fire_alarm_type = baseData.alarm_type
                    current_status = start_end_status(True,
                                                      self.fire_history_data,
                                                      baseData.timestamp, 15)
                    if current_status == 'start':
                        baseData.frame_id = time.strftime(
                            '%H%M%S', time.localtime(time.time()))
                        detectionCommonData = im_pb2.DetectionCommonData(
                            base_data=baseData)
                        detectionCommonDataDict = {}
                        detectionCommonDataDict[Zone] = detectionCommonData
                        result_data = im_pb2.GeneralDetectionMapData(
                            general_map_data=detectionCommonDataDict)
                        self.last_fire_topic_time = baseData.frame_id
                        print('topic_start')
                        pub.send_msg(topic='zs/' + Zone + '/' + 'channel' +
                                     str(Channel) + '/' + 'Firesmoke' + '/out',
                                     msg=result_data.SerializeToString(),
                                     Zone=Zone,
                                     device_id=device_id)

                    else:
                        slide_history(1, self.fire_history_data)

                if alarm == 'person':
                    np_array_RGB = opencv2matplotlib(img)  # Convert to RGB
                    image = Image.fromarray(np_array_RGB)  # PIL image
                    byte_array = pil_image_to_byte_array(image)
                    baseData = im_pb2.BaseData()
                    baseData.img = byte_array
                    baseData.app_id = Zone
                    baseData.channel_id = 'channel' + str(Channel)
                    baseData.fps = 20
                    baseData.timestamp = time.time()
                    self.last_person_alarm_time = time.time()
                    baseData.alarm_type = 'Personnelfall'
                    self.last_person_alarm_type = baseData.alarm_type
                    current_status = start_end_status(True,
                                                      self.person_history_data,
                                                      baseData.timestamp, 15)
                    if current_status == 'start':
                        baseData.frame_id = time.strftime(
                            '%H%M%S', time.localtime(time.time()))
                        detectionCommonData = im_pb2.DetectionCommonData(
                            base_data=baseData)
                        detectionCommonDataDict = {}
                        detectionCommonDataDict[Zone] = detectionCommonData
                        result_data = im_pb2.GeneralDetectionMapData(
                            general_map_data=detectionCommonDataDict)
                        self.last_person_topic_time = baseData.frame_id
                        print('topic_start')
                        print(baseData.frame_id)
                        topic = 'zs/' + Zone + '/' + 'channel' + str(
                            Channel) + '/' + 'Personnelfall' + '/out'
                        print(topic)
                        pub.send_msg(topic='zs/' + Zone + '/' + 'channel' +
                                     str(Channel) + '/' + 'Personnelfall' +
                                     '/out',
                                     msg=result_data.SerializeToString(),
                                     Zone=Zone,
                                     device_id=device_id)

                    else:
                        slide_history(1, self.person_history_data)

            else:
                slide_history(0, self.fire_history_data)
                slide_history(0, self.person_history_data)
                if self.last_person_alarm_time is not None:
                    current_status = start_end_status(
                        False, self.person_history_data,
                        self.last_person_alarm_time, 15)
                    if current_status == 'end':
                        self.last_person_alarm_time = None
                        end_topic(self.last_person_topic_time,
                                  self.last_person_alarm_type, Zone, Channel,
                                  device_id, pub)

                if self.last_fire_alarm_time is not None:
                    current_status = start_end_status(
                        False, self.fire_history_data,
                        self.last_fire_alarm_time, 15)
                    if current_status == 'end':
                        self.last_fire_alarm_time = None
                        end_topic(self.last_fire_topic_time,
                                  self.last_fire_alarm_type, Zone, Channel,
                                  device_id, pub)
def show_matr(ax, matr):
    ax.imshow(imutils.opencv2matplotlib(matr))
    ax.set_axis_off()
    ax.set_title("croped")
    return ax
# skeletonize the image using a 3x3 kernel
cv2.imshow("Original", logo)
gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
skeleton = imutils.skeletonize(gray, size=(3, 3))
cv2.imshow("Skeleton", skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()

# 5. MATPLOTLIB
# INCORRECT: show the image without converting color spaces
plt.figure("Incorrect")
plt.imshow(cactus)

# CORRECT: convert color spaces before using plt.imshow
plt.figure("Correct")
plt.imshow(imutils.opencv2matplotlib(cactus))
plt.show()

# 6. URL TO IMAGE
# load an image from a URL, convert it to OpenCV, format, and
# display it
url = "http://pyimagesearch.com/static/pyimagesearch_logo_github.png"
logo = imutils.url_to_image(url)
cv2.imshow("URL to Image", logo)
cv2.waitKey(0)
cv2.destroyAllWindows()

# 7. AUTO CANNY
# convert the logo to grayscale and automatically detect edges
gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
edgeMap = imutils.auto_canny(gray)
Beispiel #26
0
def show(title, image):
    plt.figure(title)
    plt.imshow(opencv2matplotlib(image))
    plt.show()
def show(title, image):
    plt.figure(title)
    plt.imshow(opencv2matplotlib(image))
    plt.show()
Beispiel #28
0
def validate_input(img_in):
    img = cv2.imread(img_in)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    resized = imutils.resize(gray, width=28)
    plt.imshow(imutils.opencv2matplotlib(resized))
    plt.show()
Beispiel #29
0
from PIL import Image
import cv2
import io
from imutils import opencv2matplotlib

image_path = '/home/mengjun/project/0.jpg'


def pil_image_to_byte_array(image):
    imgByteArr = io.BytesIO()
    image.save(imgByteArr, "PNG")
    return imgByteArr.getvalue()


image = cv2.imread(image_path)
np_array_RGB = opencv2matplotlib(image)  # Convert to RGB
image = Image.fromarray(np_array_RGB)  # PIL image
byte_array = pil_image_to_byte_array(image)

#idx = 0  # 往paho/temperature 一直发送内容
while True:
    print("send success")
    publish.single(
        "paho/temperature",
        payload=byte_array,
        hostname="localhost",
        client_id="lora1",
        # qos = 0,
        # tls=tls,
        port=1883,
        protocol=mqtt.MQTTv311)
Beispiel #30
0
ax = fig.add_subplot(132)
hist = cv2.calcHist([chans[1], chans[2]], [0, 1], None, [32, 32],
                    [0, 256, 0, 256])
p = ax.imshow(hist, interpolation='nearest')
ax.set_title('2D Color Histogram for G and R')
plt.colorbar(p)

# plot 2D color histogram for blue and red
ax = fig.add_subplot(133)
hist = cv2.calcHist([chans[0], chans[2]], [0, 1], None, [32, 32],
                    [0, 256, 0, 256])
p = ax.imshow(hist, interpolation='nearest')
ax.set_title('2D Color Histogram for B and R')
plt.colorbar(p)

plt.show()
# examine the dimension of the 2D histogram
print('2D histogram shape: {}, with {} value'.format(hist.shape,
                                                     hist.flatten().shape[0]))

# 3D histogram
hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8],
                    [0, 256, 0, 256, 0, 256])

# display the original image with matplotlib
plt.figure()
plt.axis('off')

plt.imshow(imutils.opencv2matplotlib(image))

plt.show()
Beispiel #31
0
    def process(self):
        # Face shape related
        face_alignment.extract_face_triangles(self)
        face_alignment.warp_example(self)
        face_alignment.make_masks(self)

        # Makeup
        makeup_utils.layer_decomposition(self)
        makeup_utils.color_transfer(self)
        makeup_utils.skin_detail_transfer(self)
        makeup_utils.highlight_shading_transfer(self)
        makeup_utils.lip_makeup(self)

        self.subject_makeup_mask_lab = np.zeros_like(self.subject_image)

        self.makeup_mask = self.entire_face_mask - self.eyes_mask - self.inner_mouth_mask

        self.subject_makeup_mask_lab[:, :,
                                     0] = self.face_structure_resultant + self.skin_detail_resultant
        self.subject_makeup_mask_lab[:, :, 1:] = np.where(
            cv.merge([self.lip_mask, self.lip_mask]) == 255,
            self.subject_lip_makeup[:, :, 1:], cv.merge([self.rc_a,
                                                         self.rc_b]))
        self.subject_makeup_mask_lab = cv.bitwise_and(
            self.subject_makeup_mask_lab,
            self.subject_makeup_mask_lab,
            mask=self.makeup_mask)

        self.subject_makeup_mask_bgr = cv.cvtColor(
            self.subject_makeup_mask_lab, cv.COLOR_LAB2BGR)

        self.subject_makeup = np.where(
            cv.merge([self.makeup_mask, self.makeup_mask,
                      self.makeup_mask]) == 255, self.subject_makeup_mask_bgr,
            self.subject_image)

        # Blur boundaries
        eroded_mask = cv.erode(self.entire_face_mask,
                               np.ones((1, 1), np.uint8),
                               iterations=2)
        contours, _ = cv.findContours(eroded_mask, cv.RETR_TREE,
                                      cv.CHAIN_APPROX_SIMPLE)
        self.blurred_face_contour = np.zeros_like(eroded_mask)
        cv.drawContours(self.blurred_face_contour, contours, -1, 255, 4)

        subject_makeup_blurred = cv.GaussianBlur(self.subject_image, (9, 9), 0)
        blurred_face_contour_boolean = (self.blurred_face_contour == 255)

        self.subject_makeup[
            blurred_face_contour_boolean] = subject_makeup_blurred[
                blurred_face_contour_boolean]

        # self.subject_makeup = cv.seamlessClone(self.subject_makeup_mask_bgr, self.subject_image, self.makeup_mask, (self.subject_image.shape[1]//2, self.subject_image.shape[0]//2), cv.MIXED_CLONE)

        self.xdog_makeup_lab = cv.cvtColor(
            cv.cvtColor(self.xdog, cv.COLOR_GRAY2BGR), cv.COLOR_BGR2LAB)
        self.xdog_makeup_lab[:, :, 1:] = np.where(
            cv.merge([self.makeup_mask, self.makeup_mask]) == 255,
            self.subject_makeup_mask_lab[:, :, 1:], self.xdog_makeup_lab[:, :,
                                                                         1:])
        self.xdog_makeup_lab[:, :, 0] = np.where(
            self.skin_mask == 255, 0.25 * self.face_structure_resultant +
            0.75 * self.xdog_makeup_lab[:, :, 0], self.xdog_makeup_lab[:, :,
                                                                       0])
        self.xdog_makeup_lab[:, :, 0] = np.where(
            self.lip_mask == 255, 0.75 * self.face_structure_resultant +
            0.25 * self.xdog_makeup_lab[:, :, 0], self.xdog_makeup_lab[:, :,
                                                                       0])
        # self.xdog_makeup_lab[:,:,1:] = self.gamma*self.xdog_makeup_lab[:,:,1:]
        self.xdog_makeup = cv.cvtColor(self.xdog_makeup_lab, cv.COLOR_LAB2BGR)

        # Showcase
        plt.subplot(2, 3, 1)
        plt.imshow(opencv2matplotlib(self.subject_image))
        plt.subplot(2, 3, 2)
        plt.imshow(opencv2matplotlib(self.xdog_makeup))
        plt.subplot(2, 3, 3)
        plt.imshow(opencv2matplotlib(self.subject_makeup))
        plt.subplot(2, 3, 5)
        plt.imshow(opencv2matplotlib(self.example_image))
        plt.show()