コード例 #1
0
def _init_category_index(label_map_path):
  """Creates category index from class indexes to name of the classes.

  Args:
    label_map_path: path to the mapping.
  Returns:
    A map for mapping int keys to string categories.
  """

  label_map = label_map_util.load_labelmap(label_map_path)
  num_classes = np.max(x.id for x in label_map.item)
  categories = label_map_util.convert_label_map_to_categories(
      label_map, max_num_classes=num_classes, use_display_name=True)
  category_index = label_map_util.create_category_index(categories)
  return category_index
コード例 #2
0
    # nms
    nms_begin = time.time()
    if len(sys.argv) > 1 and sys.argv[1] == 'only_person':
        boxes, classes, scores = dsnms(res, only_person=True)
    else:
        boxes, classes, scores = dsnms(res)
    nms_end = time.time()
    print('total nms: {:.4f}s'.format(nms_end - nms_begin))

    # save&visualization
    save_begin = time.time()
    PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
    NUM_CLASSES = 80
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
    if not os.path.exists('video/output'):
        os.makedirs('video/output')
    else:
        for root, dirs, files in os.walk('video/output'):
            for f in files:
                os.unlink(os.path.join(root, f))
    for i, image_path in enumerate(pkllist):
        start = time.time()
        image_process = get_labeled_image(category_index, image_path,
                                          np.array(boxes[i]),
                                          np.array(classes[i]),
                                          np.array(scores[i]))
        scipy.misc.imsave('video/output/frame{}.jpg'.format(i), image_process)
        print('(%d/%d)writing image time: %5.3fs \r' %
コード例 #3
0
def initLabels(path):
    global category_index
    label_map = label_map_util.load_labelmap(path)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
コード例 #4
0
    def __init__(self):
        #TODO load classifier
        # pass

		# light state
		self.cur_ls = TrafficLight.UNKNOWN

		# This is needed since file is stored in
		sys.path.append("..")

		# Name of the directory containing the object detection module we're using
		MODEL_NAME = 'light_classification/inference_graph'

		# Grab path to current working directory
		CWD_PATH = os.getcwd()

		# Path to frozen detection graph .pb file, which contains the model that is used
		# for object detection.
		PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')

		# Path to label map file
		PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,'labelmap.pbtxt')

		# Number of classes the object detector can identify
		NUM_CLASSES = 3

		# Load the label map.
		# Label maps map indices to category names, so that when our convolution
		# network predicts `1`, we know that this corresponds to `Red`.
		# Here we use internal utility functions, but anything that returns a
		# dictionary mapping integers to appropriate string labels would be fine
		label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
		categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
		category_index = label_map_util.create_category_index(categories)

		# Load the Tensorflow model into memory.
		detection_graph = tf.Graph()
		with detection_graph.as_default():
		    od_graph_def = tf.GraphDef()
		    with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
				serialized_graph = fid.read()
				od_graph_def.ParseFromString(serialized_graph)
				tf.import_graph_def(od_graph_def, name='')

		    self.sess = tf.Session(graph=detection_graph)

		# Define input and output tensors (i.e. data) for the object detection classifier

		# Input tensor is the image
		self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

		# Output tensors are the detection boxes, scores, and classes
		# Each box represents a part of the image where a particular object was detected
		self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

		# Each score represents level of confidence for each of the objects.
		# The score is shown on the result image, together with the class label.
		self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
		self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

		# Number of objects detected
		self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')
コード例 #5
0
def detection(video_input=0, show=False, write=False, threshold=0.5):
    def angle(coord):
        def unit_vector(vector):
            return vector / np.linalg.norm(vector)

        v1_u = unit_vector((1, 0))
        v2_u = unit_vector(coord)
        theta = np.rad2deg(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))
        if (coord[1] > 0):
            theta = 360 - theta
        return theta

    def direction(theta):
        if theta <= 9 or theta >= 351:
            direction = 'E'
        elif theta >= 81 and theta <= 99:
            direction = 'N'
        elif theta >= 171 and theta <= 189:
            direction = 'W'
        elif theta >= 261 and theta <= 279:
            direction = 'S'
        elif theta > 9 and theta < 81:
            direction = 'NE'
        elif theta > 99 and theta < 171:
            direction = 'NW'
        elif theta > 189 and theta < 261:
            direction = 'SW'
        else:
            direction = 'SE'
        return direction

    if (video_input == '0'):
        fvs = FileVideoStream(0).start()
    else:
        fvs = FileVideoStream(video_input).start()

    cap = fvs.get_stream()
    global cap_width
    cap_width = int(cap.get(3))
    cap_height = int(cap.get(4))

    if write:
        out = cv2.VideoWriter('output/output_{:%y%m%d_%H%M%S}.avi'.format(
            datetime.datetime.now()), cv2.VideoWriter_fourcc(*'XVID'), cap.get(5),
            (cap_width // 3, cap_height // 3))

    MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
    MODEL_FILE = MODEL_NAME + '.tar.gz'
    DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
    PATH_TO_CKPT =  os.path.join('data', 'model', 'frozen_inference_graph.pb')
    PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
    NUM_CLASSES = 2

    if not os.path.isfile(PATH_TO_CKPT):
        opener = urllib.request.URLopener()
        opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
        tar_file = tarfile.open(MODEL_FILE)
        for file in tar_file.getmembers():
            file_name = os.path.basename(file.name)
            if not file.isdir() and not 'saved_model.pb' in file_name:
                file.name = file_name
                tar_file.extract(file, os.path.join('data', 'model'))

    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map,
        max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')

            try:
                init = False
                count = 0
                theta = 0
                center_curr = np.array([0, 0])
                center_delta_curr = np.array([0, 0])
                center_ref = np.array([0.5, 0.5])
                center_prev = np.array([0.5, 0.5])
                ref = (0, 0, 0, 0)

                while True:
                    image_np = fvs.read()

                    if not fvs.more():
                        fvs.stop()
                        print("Released video source.")
                        break

                    if (counter % 20 == 0):
                        image_np_expanded = np.expand_dims(image_np, axis=0)
                        (boxes, scores, classes, num) = sess.run([detection_boxes,
                            detection_scores, detection_classes, num_detections],
                            feed_dict={image_tensor: image_np_expanded})

                        for i in range(min(20, np.squeeze(boxes).shape[0])):
                            class_value = np.squeeze(classes).astype(np.int32)[i]
                            score_value = np.squeeze(scores)[i]
                            if class_value in category_index.keys():
                                ymin, xmin, ymax, xmax = tuple(np.squeeze(boxes)[i].tolist())
                                class_name = category_index[class_value]['name']
                                if score_value > threshold:
                                    center = np.array([(xmin + xmax) / 2, (ymin + ymax) / 2])
                                    center_curr = center
                                    center_delta_curr = center_curr - center_prev
                                    distance = np.linalg.norm(center_ref - center_curr)
                                    if distance > 0.25 and count < 5 and init:
                                        count += 1
                                        scores[0][i] = 0
                                    else:
                                        count = 0
                                        if distance > 0.1 or not init:
                                            init = True
                                            center_prev = center_ref
                                            center_ref = center_curr
                                            ref = (xmin, xmax, ymin, ymax)
                                            theta = angle(center_ref - center_prev)
                                        print(('{0} @ {1:.5}% | angle: {2:.6} | direction: {3:2} | center: ({4[0]:.2f}, {4[1]:.2f}) | distance from ref: {5:.6}').format(class_name, str(score_value * 100), str(theta), direction(theta), center, str(distance)))

                    coord_prev = (int(center_prev[0] * cap_width // 3), int(center_prev[1] * cap_height // 3))
                    coord_ref = (int(center_ref[0] * cap_width // 3), int(center_ref[1] * cap_height // 3))
                    min_vertex = (int(ref[0] * cap_width // 3), int(ref[2] * cap_height // 3))
                    max_vertex = (int(ref[1] * cap_width // 3), int(ref[3] * cap_height // 3))
                    cv2.rectangle(image_np, min_vertex, max_vertex, (255, 0, 0), 2)
                    cv2.arrowedLine(image_np, coord_prev, coord_ref, (255, 0, 0), 3, tipLength=0.25)
                    display_str = 'angle: {0:.2f} ({1})'.format(theta, direction(theta))
                    cv2.putText(image_np, display_str, (int(ref[0] * cap_width // 3), int((ref[2] - 0.0275) * cap_height // 3)), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2, cv2.LINE_AA)

                    if write:
                        out.write(image_np)

                    if show:
                        cv2.imshow('Object Detection', image_np)

                    if cv2.waitKey(50) & 0xFF == ord('q'):
                        cv2.destroyAllWindows()
                        break
            except KeyboardInterrupt:
                fvs.stop()
                if write:
                    out.release()
                print("Released video source.")
コード例 #6
0
with model.as_default():
	# initialize the graph definition
	# graphDef = tf.GraphDef()
	graphDef = tf.compat.v1.GraphDef()

	# load the graph from disk
	# with tf.gfile.GFile(args["model"], "rb") as f:
	with tf.io.gfile.GFile(args["model"], "rb") as f:
		serializedGraph = f.read()
		graphDef.ParseFromString(serializedGraph)
		tf.import_graph_def(graphDef, name="")

# load the class labels from disk
labelMap = label_map_util.load_labelmap(args["labels"])
categories = label_map_util.convert_label_map_to_categories(
	labelMap, max_num_classes=args["num_classes"],
	use_display_name=True)
categoryIdx = label_map_util.create_category_index(categories)

# create a session to perform inference
with model.as_default():
	with tf.Session(graph=model) as sess:
		# grab a reference to the input image tensor and the boxes
		# tensor
		imageTensor = model.get_tensor_by_name("image_tensor:0")
		boxesTensor = model.get_tensor_by_name("detection_boxes:0")

		# for each bounding box we would like to know the score
		# (i.e., probability) and class label
		scoresTensor = model.get_tensor_by_name("detection_scores:0")
		classesTensor = model.get_tensor_by_name("detection_classes:0")
コード例 #7
0
    def main(self, image_filepath):

        TEST_IMAGES = self.get_test_images(image_filepath)

        box_coord = []
        for IMAGE_NAME in tqdm(TEST_IMAGES):

            print('********************************')
            print('Processing image:', IMAGE_NAME)
            print('********************************')

            # Path to image
            PATH_TO_IMAGE = os.path.join(self.TEST_IMAGE_PATH, IMAGE_NAME)

            img = cv2.imread(PATH_TO_IMAGE)
            width = img.shape[1]
            height = img.shape[0]

            # Load the label map.
            label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
            categories = label_map_util.convert_label_map_to_categories(
                label_map,
                max_num_classes=self.NUM_CLASSES,
                use_display_name=True)
            category_index = label_map_util.create_category_index(categories)

            # Load Tensorflow model into memory.
            detection_graph = tf.Graph()
            with detection_graph.as_default():
                od_graph_def = tf.GraphDef()
                with tf.gfile.GFile(self.PATH_TO_FROZEN_GRAPH, 'rb') as f:
                    serialized_graph = f.read()
                    od_graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(od_graph_def, name='')

                sess = tf.Session(graph=detection_graph)

            # Input tensor is the image
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Output tensors are the detection boxes, scores, and classes
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')

            # Each score represents level of confidence for each of the objects.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')

            # Number of objects detected
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            # Load image using OpenCV and
            # expand image dimensions to have shape: [1, None, None, 3]
            image = cv2.imread(PATH_TO_IMAGE)
            image_expanded = np.expand_dims(image, axis=0)

            # Perform the actual detection by running the model with the image as input
            (boxes, scores, classes,
             num) = sess.run([
                 detection_boxes, detection_scores, detection_classes,
                 num_detections
             ],
                             feed_dict={image_tensor: image_expanded})

            if self.visualize:
                # Draw the results of the detection
                vis_util.visualize_boxes_and_labels_on_image_array(
                    image,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    use_normalized_coordinates=True,
                    line_thickness=6,
                    min_score_thresh=0.2)

                # All the results have been drawn on image
                cv2.imwrite(
                    os.path.join(self.BASE_PATH,
                                 'results/{}'.format(IMAGE_NAME)), image)
                #plt.imshow(image,cmap='gnuplot')
                #plt.show()

            # Get scores and bboxes
            for i, score in enumerate(np.squeeze(scores)):

                # inx of bbox
                bbox = 'bbox_{}'.format(i)

                if score >= 0.2:

                    #get bbox coordinates
                    ymin, xmin, ymax, xmax = np.squeeze(boxes)[i]

                    ymin = int(ymin * height)
                    xmin = int(xmin * width)
                    ymax = int(ymax * height)
                    xmax = int(xmax * width)

                    coordinates = (ymin, xmin, ymax, xmax)
                    box_coord.append(coordinates)

                else:
                    continue

            print('*****************************************************')
            print('Image Name:', IMAGE_NAME)
            print('Box Coordinates:', box_coord)
            print('Number of Detected Boxes', np.array(box_coord).shape[0])
            print('*****************************************************')

        print('Total Detected Objects:', np.array(box_coord).shape[0])
        print('Shape of Box Array:', np.array(box_coord).shape)

        return np.array(box_coord)
コード例 #8
0
def people_knives(temp):
    MODEL_NAME = './outputs/faster_rcnn/faster_rcnn_inception_v2_coco_2018_01_28'
    PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
    PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
    NUM_CLASSES = 90
    
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.compat.v1.GraphDef()
        with tf.compat.v1.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
    
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
            
    TEST_IMAGE_PATHS = [ os.path.join(temp)]
    # Size, in inches, of the output images.
    IMAGE_SIZE = (12, 8)
    with detection_graph.as_default():
        with tf.compat.v1.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')
            i='0'
            j=0
            for image_path in TEST_IMAGE_PATHS:
                #image = Image.open(temp)
                response = requests.get(temp)
                image= Image.open(BytesIO(response.content))
                # the array based representation of the image will be used later in order to prepare the
                # result image with boxes and labels on it.
                image_np = load_image_into_numpy_array(image)
                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(image_np, axis=0)
                # Actual detection.
                (boxes, scores, classes, num) = sess.run(
                [detection_boxes, detection_scores, detection_classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})
                # Visualization of the results of a detection.
                p = vis_util.visualize_boxes_and_labels_on_image_array(
               image_np,
               np.squeeze(boxes),
               np.squeeze(classes).astype(np.int32),
               np.squeeze(scores),
               category_index,
               use_normalized_coordinates=True,
               line_thickness=1)
                if(len(p)==0):
                    return ("")
                else:

                    j=0
                    person=0
                    flag=0
                    flag1=0
                    flag2=0
                    n=len(p)
                    ans=""
                    temp1=""
                    while(j<n):
                        temp1 = ""
                        
                        while(p[j]!=':'):
                            temp1 = temp1 + p[j]
                            j=j+1
                                                
                        if(temp1=="person"):
                            if(p[j]==':'):
                                j=j+2
                                temp=0
                                while(p[j]!='.' and p[j]!='%'):
                                    temp = temp*10 + ord(p[j]) - 48
                                    j=j+1
                                if(temp>95):
                                    person = person + 1
                        
                        elif(temp1=="knife"):
                            if(p[j]==':'):
                                j=j+2
                                temp=0
                                while(p[j]!='.' and p[j]!='%'):
                                    temp = temp*10 + ord(p[j]) - 48
                                    j=j+1
                                if(temp>=59 and flag==0):
                                    ans+=temp1+" ,"
                                    flag=1
                                    
                        elif(temp1=="baseball bat"):
                            if(p[j]==':'):
                                j=j+2
                                temp=0
                                while(p[j]!='.' and p[j]!='%' ):
                                    temp = temp*10 + ord(p[j]) - 48
                                    j=j+1
                                if(temp>=59 and flag1==0):
                                    ans+=temp1+" ,"
                                    flag1=1
                                    
                        elif(temp1=="scissors"):
                            if(p[j]==':'):
                                j=j+2
                                temp=0
                                while(p[j]!='.' and p[j]!='%'):
                                    temp = temp*10 + ord(p[j]) - 48
                                    j=j+1
                                if(temp>=59 and flag2==0):
                                    ans+=temp1+" ,"
                                    flag2=1
                                    
                        while(p[j]!='%'):
                            j=j+1
                        j=j+1
                        if(p[j]=='$'):
                            break
                    
                    if(person>0):
                        if(person==1 and flag == 0 and flag1 == 0 and flag2 == 0):
                            ans = ans + "1 person"
                        elif(person==1):
                            ans = ans + "and 1 person"
                        elif(person>1 and flag == 0 and flag1 == 0 and flag2 == 0):
                            ans = ans + str(person) + " people"
                        else:
                            ans = ans + "and " + str(person) + " people"
            return ans
コード例 #9
0
 def loadLabels(self):
     label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
     categories = label_map_util.convert_label_map_to_categories(
         label_map, max_num_classes=self.NUM_CLASSES, use_display_name=True)
     self.category_index = label_map_util.create_category_index(categories)
コード例 #10
0
def models_yuce(con):
    detection_graph = tf.Graph()  # 加载目标定位模型
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)  # 加载目标定位类别编号
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    # model = load_model(PATH_TO_Classification)

    c1, c2 = con
    c1.close()  # 主进程用conn1发送数据,子进程要用对应的conn2接受,所以讲conn1关闭,不关闭程序会阻塞
    with detection_graph.as_default():
        with tf.Session() as sess:
            ops = tf.get_default_graph().get_operations()
            all_tensor_names = {output.name for op in ops for output in op.outputs}
            tensor_dict = {}
            for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks']:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
            image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
            while True:
                try:  # 异常处理,出现异常退出
                    tf_list = c2.recv()
                    value = 0  # 0左1右
                    read_result = []
                    plate_dict.clear()
                    for image_np_expanded in tf_list[0]:  # 一组违法一张一张跑
                        # print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()))
                        output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image_np_expanded})
                        # print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()))
                        output_dict['num_detections'] = int(output_dict['num_detections'][0])
                        output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
                        output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
                        output_dict['detection_scores'] = output_dict['detection_scores'][0]

                        image2 = Image.fromarray(tf_list[1][value])
                        car_list, plate_list = [], []
                        for i in range(output_dict['num_detections']):  # 收集每个图片第一张车跟车牌 如果有的话
                            if output_dict['detection_scores'][i] < 0.5 or (len(car_list) == 2 and plate_list):
                                break
                            if output_dict['detection_classes'][i] == 2 and not plate_list:
                                plate_list.append(output_dict['detection_boxes'][i])
                            if output_dict['detection_classes'][i] == 1 and len(car_list) < 2:
                                car_list.append(output_dict['detection_boxes'][i])
                        if len(car_list) == 2 and int(image2.size[1] * car_list[1][2]) > int(image2.size[1] * car_list[0][0]):
                            if not int(tf_list[5]):
                                with open('result.csv', 'a')as f:
                                    f.write(tf_list[2] + ' 0 1 {}2\n'.format(value))  # 并行 2
                                plate_dict.clear()
                                save_img(tf_list[3], tf_list[2], tf_list[4], 1)
                            break
                        if plate_list:  # 如果有车牌 肯定是有车
                            img = image2.crop((int(image2.size[0] * plate_list[0][1]),
                                               int(image2.size[1] * plate_list[0][0]),
                                               int(image2.size[0] * plate_list[0][3]),
                                               int(image2.size[1] * plate_list[0][2])))
                            plate_dict[value] = img
                        elif car_list:  # 如果只有车牌 判断车牌底的位置跟图片底部位置关系
                            if image2.size[1] - int(image2.size[1] * car_list[0][2]) < 90:
                                if not int(tf_list[5]):
                                    with open('result.csv', 'a')as f:
                                        f.write(tf_list[2] + ' 0 1 {}1\n'.format(value))  # 车身不完整 1
                                    plate_dict.clear()
                                    save_img(tf_list[3], tf_list[2], tf_list[4], 1)
                                break
                            elif value == 1 and not plate_dict:  # 两辆车都没找到车牌 但是车位置明显完整
                                if not int(tf_list[5]):
                                    with open('result.csv', 'a')as f:
                                        f.write(tf_list[2] + ' 1 0\n')  # 先默认车牌都对
                                    save_img(tf_list[3], tf_list[2], tf_list[4], 0)
                                else:
                                    with open('result.csv', 'r+')as f:
                                        for a in f:
                                            read_result.append(a.split(' '))
                                        f.seek(0)
                                        f.truncate()
                                        for i in read_result:
                                            if i[0] == tf_list[2]:
                                                i[1] = '1'
                                                i[2] = '0'
                                                i[3] = '\n'
                                                save_img(TEST_IMAGE_PATHS[1], tf_list[2], tf_list[4], 0)
                                            f.write(' '.join(i))
                        else:  # 能到这的图片都没找到 那就是没车喽
                            if not int(tf_list[5]):
                                with open('result.csv', 'a')as f:
                                    f.write(tf_list[2] + ' 0 1 {}0\n'.format(value))  # 没找到车 0
                                plate_dict.clear()
                                save_img(tf_list[3], tf_list[2], tf_list[4], 1)
                            break
                        value += 1

                    if plate_dict:
                        if not int(tf_list[5]):
                            with open('result.csv', 'a')as f:
                                f.write(tf_list[2] + ' 1 0\n')  # 先默认车牌都对
                            save_img(tf_list[3], tf_list[2], tf_list[4], 0)
                        else:
                            with open('result.csv', 'r+')as f:
                                for a in f:
                                    read_result.append(a.split(' '))
                                f.seek(0)
                                f.truncate()
                                for i in read_result:
                                    if i[0] == tf_list[2]:
                                        i[1] = '1'
                                        i[2] = '0'
                                        i[3] = '\n'
                                        save_img(TEST_IMAGE_PATHS[1], tf_list[2], tf_list[4], 0)
                                    f.write(' '.join(i))
            #     print(tf_list[2].split('_')[3])
            #     for plate_key, plate_value in plate_dict.items():
            #         img_tensor = image.img_to_array(plate_value.resize((152, 52)))
            #         # print(plate_value.size)
            #         # print(img_tensor.shape)
            #         plate_value.resize((152, 52)).save('{}.jpg'.format(plate_key))
            #         img_tensor = np.expand_dims(img_tensor, axis=0)
            #         i = model.predict(img_tensor)
            #         for j in range(7):
            #             print(chars[int(np.where(i[j] == np.max(i[j]))[1])], end='')
            #         print()
                except EOFError:  # 说明素有数据已经全部接受,进程会抛出异常
                    break
コード例 #11
0
def models_yuce(con):
    num_classes = 90
    path_to_labels = 'D:/1/exe/mask_rcnn/mask_rcnn_test-master/training/mscoco_label_map.pbtxt'
    detection_graph = tf.Graph()  # 加载目标定位模型
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(
                'D:/1/exe/mask_rcnn/mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb',
                'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    label_map = label_map_util.load_labelmap(path_to_labels)  # 加载目标定位类别编号
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=num_classes, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    c1, c2 = con
    c1.close()  # 主进程用conn1发送数据,子进程要用对应的conn2接受,所以讲conn1关闭,不关闭程序会阻塞
    with detection_graph.as_default():
        with tf.Session() as sess:
            ops = tf.get_default_graph().get_operations()
            all_tensor_names = {
                output.name
                for op in ops for output in op.outputs
            }
            tensor_dict = {}
            for key in [
                    'num_detections', 'detection_boxes', 'detection_scores',
                    'detection_classes', 'detection_masks'
            ]:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    tensor_dict[key] = tf.get_default_graph(
                    ).get_tensor_by_name(tensor_name)
            detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
            detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
            real_num_detection = tf.cast(tensor_dict['num_detections'][0],
                                         tf.int32)
            detection_boxes = tf.slice(detection_boxes, [0, 0],
                                       [real_num_detection, -1])
            detection_masks = tf.slice(detection_masks, [0, 0, 0],
                                       [real_num_detection, -1, -1])

            image_tensor = tf.get_default_graph().get_tensor_by_name(
                'image_tensor:0')
            while True:
                try:  # 异常处理,出现异常退出
                    tf_list = c2.recv()
                    value = 0  # 0左1右
                    for image_np_expanded in tf_list[0]:  # 一组违法一张一张跑
                        detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                            detection_masks, detection_boxes,
                            image_np_expanded.shape[1],
                            image_np_expanded.shape[2])
                        detection_masks_reframed = tf.cast(
                            tf.greater(detection_masks_reframed, 0.5),
                            tf.uint8)
                        tensor_dict['detection_masks'] = tf.expand_dims(
                            detection_masks_reframed, 0)

                        print(
                            time.strftime("%Y-%m-%d %H:%M:%S ",
                                          time.localtime()), '开始')
                        output_dict = sess.run(
                            tensor_dict,
                            feed_dict={image_tensor: image_np_expanded})
                        print(
                            time.strftime("%Y-%m-%d %H:%M:%S ",
                                          time.localtime()), '结束')
                        output_dict['num_detections'] = int(
                            output_dict['num_detections'][0])
                        output_dict['detection_classes'] = output_dict[
                            'detection_classes'][0].astype(np.uint8)
                        output_dict['detection_boxes'] = output_dict[
                            'detection_boxes'][0]
                        output_dict['detection_scores'] = output_dict[
                            'detection_scores'][0]
                        output_dict['detection_masks'] = output_dict[
                            'detection_masks'][0]

                        vis_util.visualize_boxes_and_labels_on_image_array(
                            tf_list[1][value],
                            output_dict['detection_boxes'],
                            output_dict['detection_classes'],
                            output_dict['detection_scores'],
                            category_index,
                            instance_masks=output_dict.get('detection_masks'),
                            use_normalized_coordinates=True,
                            line_thickness=8)
                        image2 = Image.fromarray(tf_list[1][value])
                        image2.save('result/{}_{}{}'.format(
                            os.path.splitext(tf_list[2])[0], value,
                            os.path.splitext(tf_list[2])[1]))
                        value += 1
                except EOFError:  # 说明素有数据已经全部接受,进程会抛出异常
                    break