示例#1
0
    def classify_image(self, filename):
        try:
            params = {'ssd_anchors':self.ssd_anchors,'img_input':self.img_input,'isess':self.isess,'image_4d':self.image_4d,'predictions':self.predictions,'localisations':self.localisations,'bbox_img':self.bbox_img}

            # read image
            img = mpimg.imread(filename)


            starttime = time.time()
            rclasses, rscores, rbboxes =  process_image(img,params)
            visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            endtime = time.time()

            bet_result = [(str(idx+1)+' : '+ self.l_VOC_CLASS[v], '%.5f' % rscores[idx]) for idx, v in enumerate(rclasses)]


            # save image after draw box
            fileout = str(datetime.datetime.now()).replace(' ', '_') + 'processed_img' + '.jpg' 
            fileps = os.path.join(DETECTED_FOLDER, fileout)
            cv2.imwrite(fileps,img)

            new_img_base64 = embed_image_html(fileps)


            rtn = (True, (rclasses, rscores, rbboxes), bet_result, '%.3f' % (endtime - starttime))
            return rtn,new_img_base64

        except Exception as err:
            logging.info('Classification error: %s', err)
            return (False, 'Something went wrong when classifying the '
                           'image. Maybe try another one?')
示例#2
0
    def VideoProcessFromC(self, request, context):
        nparr = np.fromstring(request.frame, dtype=np.uint8)
        nparr = np.reshape(nparr, (480, 640, 3), order='C')

        img = cv2.flip(nparr, 1)
        #img = frame
        rclasses, rscores, rbboxes = process_image(img, 0.3)

        visualization.bboxes_draw_on_img(img,
                                         rclasses,
                                         rscores,
                                         rbboxes,
                                         visualization.colors_tableau,
                                         class2label,
                                         thickness=2)

        return exchange_frame_pb2.OutputVideo(frame=img.tobytes())
示例#3
0
    def VideoProcess(self, request, context):

        img = pickle.loads(request.frame)

        img = cv2.flip(img, 1)
        #img = frame
        rclasses, rscores, rbboxes = process_image(img, 0.3)

        visualization.bboxes_draw_on_img(img,
                                         rclasses,
                                         rscores,
                                         rbboxes,
                                         visualization.colors_tableau,
                                         class2label,
                                         thickness=2)

        frameBytes = pickle.dumps(img)

        return exchange_frame_pb2.OutputVideo(frame=frameBytes)
示例#4
0
def detect(request):
    tmp_filename = 'media/' + ''.join(random.choice(string.ascii_uppercase 
                        + string.digits) for _ in range(64)) + '.jpg'
    if request.method == 'POST':
        handle_uploaded_file(request.FILES['image'], tmp_filename)
    elif request.method == 'GET':
        url = request.GET['url']
        try:
            urllib.urlretrieve(url, tmp_filename)
        except:
            return JsonResponse({'code':config.CODE_INV_URL})
    else:
        return JsonResponse({'code':config.CODE_MTHD_NOT_SPRT})

    try:
        img = skimage.io.imread(tmp_filename)
        global gb_detector
        cc, scores, bboxes = gb_detector.dectectFace(img)
        res = {}

        if len(scores) > 0:
            visualization.bboxes_draw_on_img(img, cc, scores, bboxes, 
                visualization.colors, class_names=['none-face', 'face'])
            skimage.io.imsave(tmp_filename, img)
            bboxes = normalizeBBoxes(bboxes, img.shape[1], img.shape[0])

        res['code'] = config.CODE_SUCCESS
        res['coordinates'] = []
        for ii in range(len(scores)):
            if scores[ii] >= config.SCORE_THRES:
                y1, x1, y2, x2 = bboxes[ii]
                res['coordinates'].append("%d,%d,%d,%d"%(x1, y1, x2 - x1, y2 - y1))
        res['num'] = len(res['coordinates'])
        res["url"] = '/' + tmp_filename
    except:
        res = {'code':config.CODE_SYS_ERR}
    return JsonResponse(res)
                        num_detections = detection_graph.get_tensor_by_name('num_detections:0')
                        # Actual detection.
                        (boxes, scores, classes, num) = sess.run(
                            [boxes, scores, classes, num_detections],
                            feed_dict={image_tensor: image_np_expanded})
                            
                    if len(rclasses)>0:
                        nowMicro = getCurrentClock()
                        if modelType=="ssd":
                            print("# %s - %s - %0.4f seconds ---" % (frameCnt,rclasses.astype('|S3'), (nowMicro - start_time).total_seconds()))
                        elif modelType=="tensorflow":
                            print("# %s - %s - %0.4f seconds ---" % (frameCnt, classes.astype('|S3'), (nowMicro - start_time).total_seconds()))
                        start_time = nowMicro
                    if showImage:
                        if modelType=="ssd":
                            visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
                        elif modelType=="tensorflow":
                            # Visualization of the results of a detection.
                            vis_util.visualize_boxes_and_labels_on_image_array(
                                img,
                                np.squeeze(boxes),
                                np.squeeze(classes).astype(np.int32),
                                np.squeeze(scores),
                                category_index,
                                use_normalized_coordinates=True,
                                line_thickness=8)

                            #img = image_np
                        if cropping:
                            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                            cv2.imshow("crop",img)
示例#6
0
    return rclasses, rscores, rbboxes





cam = False
if cam:
    #test using an actual camera
    cap = cv2.VideoCapture(0)
    #while(True):
    while(cap.isOpened()):
        ret, img = cap.read()  
        rclasses, rscores, rbboxes =  process_image(img)
   
        visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
        cv2.imshow('img',img)
        cv2.waitKey(20)
    
        if cv2.waitKey(20) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()

else:
    # Test on some demo image and visualize output. 
    path = '../checkin/'
    image_names = sorted(os.listdir(path))

   
                p = img[a:c, b:d]

                cs, ss, bs = self._predict(p)
                bs = (bs.reshape(-1).reshape(-1, 2) * sc + tl).reshape(-1)
                classes = np.concatenate((classes, cs))
                scores = np.concatenate((scores, ss))
                bboxes = np.concatenate((bboxes, bs))
                cnt += 1

        bboxes = bboxes.reshape(-1, 4)
        classes, scores, bboxes = self._post_process_bbox(
            classes, scores, bboxes,
            float(h) / w, 0.6)
        # print cnt
        return classes, scores, bboxes


if __name__ == '__main__':
    fd = FaceDetection()
    # fd.load()
    img = cv2.imread('demo2.jpg')
    c, s, b = fd.dectectFace(img)
    visualization.bboxes_draw_on_img(img,
                                     c,
                                     s,
                                     b,
                                     visualization.colors,
                                     class_names=['none-face', 'face'])
    cv2.imwrite('demo2-out.jpg', img)
示例#8
0
 process_medium_time = np.zeros((len(image_names), 1))
 process_fovea_time = np.zeros((len(image_names), 1))
 for lvar in range(len(image_names)):
     img = mpimg.imread(path + video + image_names[lvar])
     start_time = time.time()
     rclasses, rscores, rbboxes, network_time[lvar] = process_image(
         img, 0.5, 0.45, (300, 300), True, False)
     #print(rbboxes)
     #plt.imshow(img)
     #plt.show()
     end_time = time.time()
     process_time[lvar] = end_time - start_time
     #visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)
     #visualization.plt_bboxes(img, rclasses, rscores, rbboxes)
     rimg = np.array(img)
     visualization.bboxes_draw_on_img(rimg, rclasses, rscores, rbboxes,
                                      visualization.colors_tableau)
     visualization.plt_bboxes(rimg, rclasses, rscores, rbboxes,
                              outputpath + "complete_" + image_names[lvar])
     #print(rbboxes)
     #print(rclasses)
     #print(rscores)
     #rbboxes =np.array([[0,1,0,1],[0.46,0.14,0.74,0.56]])
     if rbboxes.shape[0] >= 1:
         start_time = time.time()
         y1 = int(rbboxes[-1, 0] * 0.8 * img.shape[0])
         x1 = int(rbboxes[-1, 1] * 0.8 * img.shape[1])
         y2 = min(int(rbboxes[-1, 2] * 1.2 * img.shape[0]), img.shape[0])
         x2 = min(int(rbboxes[-1, 3] * 1.2 * img.shape[1]), img.shape[1])
         #print(y2-y1,x2-x1)
         medium_img = np.array(img[y1:y2, x1:x2, :])
         #medium_img = np.array(img)
示例#9
0
文件: SSD_demo.py 项目: daxiapazi/SSD
import matplotlib.image as mpimg
import random
from ssd_300_vgg import SSD
from utils import preprocess_image, process_bboxes
from visualization import bboxes_draw_on_img

tf.reset_default_graph()  #重设图
ssd_net = SSD()
classes, scores, bboxes = ssd_net.detections()
images = ssd_net.images()

sess = tf.Session()
# Restore SSD model.
ckpt_filename = './ssd_checkpoints/ssd_vgg_300_weights.ckpt'
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, ckpt_filename)

img_old = cv2.imread('./demo/dog.jpg')
img = cv2.cvtColor(img_old, cv2.COLOR_BGR2RGB)
img_prepocessed = preprocess_image(img)
#去均值归一化
rclasses, rscores, rbboxes = sess.run([classes, scores, bboxes],
                                      feed_dict={images: img_prepocessed})
rclasses, rscores, rbboxes = process_bboxes(rclasses, rscores, rbboxes)
colors = []
for i in range(20):
    colors.append((random.random(), random.random(), random.random()))
#plt_bboxes(img, rclasses, rscores, rbboxes)
bboxes_draw_on_img(img_old, rclasses, rscores, rbboxes, colors, thickness=2)