예제 #1
0
def draw(model_body, class_names, anchors, image_data, image_set='val',
            weights_name='trained_stage_3_best.h5', out_path="output_images", save_all=True):
    '''
    Draw bounding boxes on image data
    '''
    if image_set == 'train':
        image_data = np.array([np.expand_dims(image, axis=0)
            for image in image_data[:int(len(image_data)*.9)]])
    elif image_set == 'val':
        image_data = np.array([np.expand_dims(image, axis=0)
            for image in image_data[int(len(image_data)*.9):]])
    elif image_set == 'all':
        image_data = np.array([np.expand_dims(image, axis=0)
            for image in image_data])
    else:
        ValueError("draw argument image_set must be 'train', 'val', or 'all'")
    # model.load_weights(weights_name)
    print(image_data.shape)
    model_body.load_weights(weights_name)

    # Create output variables for prediction.
    yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(
        yolo_outputs, input_image_shape, score_threshold=0.07, iou_threshold=0)

    # Run prediction on overfit image.
    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    if  not os.path.exists(out_path):
        os.makedirs(out_path)
    for i in range(len(image_data)):
        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                model_body.input: image_data[i],
                input_image_shape: [image_data.shape[2], image_data.shape[3]],
                K.learning_phase(): 0
            })
        print('Found {} boxes for image.'.format(len(out_boxes)))
        print(out_boxes)

        # Plot image with predicted boxes.
        image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
                                    class_names, out_scores)
        # Save the image:
        if save_all or (len(out_boxes) > 0):
            image = PIL.Image.fromarray(image_with_boxes)
            image.save(os.path.join(out_path,str(i)+'.png'))
예제 #2
0
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    test_path = os.path.expanduser(args.test_path)
    output_path = os.path.expanduser(args.output_path)
    label_file = args.label_file

    if not os.path.exists(output_path):
        print('Creating output path {}'.format(output_path))
        os.mkdir(output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    # TODO: USE THIS INSTEAD
    # with open(anchors_path) as f:
    #     anchors = f.readline()
    #     anchors = [float(x) for x in anchors.split(',')]
    #     anchors = np.array(anchors).reshape(-1, 2)

    # yolo_model = load_model(model_path)
    # ALEXANDER HACK
    anchors = np.array(
        ((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
         (7.88282, 3.52778), (9.77052, 9.16828)))
    yolo_model, model = create_model(anchors,
                                     class_names,
                                     load_pretrained=False,
                                     freeze_body=True)
    model.load_weights(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)

    lf = open(label_file)
    num_tests = 10
    count = 0
    while (count < num_tests):
        count += 1
        line = lf.readline()
        line = line.split(' ')
        image_file = os.path.join(utils.home(), 'data', line[0])
        image = Image.open(image_file)
        image_file = line[0]
        pts = line[1:]
        pts = np.asarray(pts, dtype=np.float)
        image = np.array(image)
        image = utils.imwarp(image, pts, sz=(416, 416))
        image_data = image
        image_data = np.expand_dims(image_data, 0)
        image = Image.fromarray(image)
        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)

            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        image.save(os.path.join(output_path, image_file), quality=90)
    sess.close()
예제 #3
0
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)

    ###### initialize variables for FPS calculation
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()

    vid = cv2.VideoCapture(0)  ### TODO: will video path other than 0 be used?
    if not vid.isOpened():
        raise IOError("Couldn't open webcam")

    while True:
        ### read captured video with opencv2
        return_value, frame = vid.read()
        ### convert opencv image to PIL image
        image = Image.fromarray(frame)

        if is_fixed_size:  # TODO: When resizing we can use minibatch input.
            resized_image = image.resize(tuple(reversed(model_image_size)),
                                         Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
        else:
            # Due to skip connection + max pooling in YOLO_v2, inputs must have
            # width and height as multiples of 32.
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            resized_image = image.resize(new_image_size, Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
            print(image_data.shape)

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        ###print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)

            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            ###print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        if args.show_fps:
            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0

        OpenCV_data = np.asarray(image)
        if args.show_fps:
            ### Draw FPS in top left corner
            cv2.putText(OpenCV_data,
                        text=fps,
                        org=(3, 15),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50,
                        color=(0, 0, 0),
                        thickness=2)
        cv2.imshow("Results", OpenCV_data)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    sess.close()
예제 #4
0
    def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
    """
    Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
    
    Arguments:
    yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
                    box_confidence: tensor of shape (None, 19, 19, 5, 1)
                    box_xy: tensor of shape (None, 19, 19, 5, 2)
                    box_wh: tensor of shape (None, 19, 19, 5, 2)
                    box_class_probs: tensor of shape (None, 19, 19, 5, 80)
    image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
    max_boxes -- integer, maximum number of predicted boxes you'd like
    score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
    iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
    
    Returns:
    scores -- tensor of shape (None, ), predicted score for each box
    boxes -- tensor of shape (None, 4), predicted box coordinates
    classes -- tensor of shape (None,), predicted class for each box
    """
    
  
    
    # Retrieve outputs of the YOLO model (≈1 line)
    box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs

    # Convert boxes to be ready for filtering functions 
    boxes = yolo_boxes_to_corners(box_xy, box_wh)

    # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
    scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = score_threshold)
    
    # Scale boxes back to original image shape.
    boxes = scale_boxes(boxes, image_shape)

    # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
    scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes = max_boxes, iou_threshold = iou_threshold)
    
    
    
    return scores, boxes, classes
    
    sess = K.get_session()
    class_names = read_classes("model_data/coco_classes.txt")
    anchors = read_anchors("model_data/yolo_anchors.txt")
    image_shape = (720., 1280.)
    
    
    yolo_model = load_model("model_data/yolo.h5")
    
    
    yolo_model.summary()
    
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    
    scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
    
    def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
    
    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.
    
    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes
    
    
    """

    # Preprocess your image
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.=  
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase(): 0})
   

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    # Generate colors for drawing bounding boxes.
    colors = generate_colors(class_names)
    # Draw bounding boxes on the image file
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image.save(os.path.join("out", image_file), quality=90)
    # Display the results
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)
    
    return out_scores, out_boxes, out_classes
    
    out_scores, out_boxes, out_classes = predict(sess, "test.jpg")
예제 #5
0
def yolo_find():
    global open_cv_image, model_image_size, net_res, temp, install_net, sess, is_fixed_size, anchors, class_names, boxes, scores, classes, yolo_model
    global num_classes, num_anchors, input_image_shape, colors

    if install_net == False:
        install_net = True
        sess = K.get_session()

        model_path = '/home/pi/robot/model_data/tiny-yolo-voc.h5'
        anchors_path = '/home/pi/robot/model_data/tiny-yolo-voc_anchors.txt'
        classes_path = '/home/pi/robot/model_data/pascal_classes.txt'
        with open(classes_path) as f:
            class_names = f.readlines()
        class_names = [c.strip() for c in class_names]

        with open(anchors_path) as f:
            anchors = f.readline()
            anchors = [float(x) for x in anchors.split(',')]
            anchors = np.array(anchors).reshape(-1, 2)

        yolo_model = load_model(model_path)
        num_classes = len(class_names)
        num_anchors = len(anchors)

        model_output_channels = yolo_model.layers[-1].output_shape[-1]
        assert model_output_channels == num_anchors * (num_classes + 5), \
            'Mismatch between model and given anchor and class sizes. ' \
            'Specify matching anchors and classes with --anchors_path and ' \
            '--classes_path flags.'
        print('{} model, anchors, and classes loaded.'.format(model_path))

        model_image_size = yolo_model.layers[0].input_shape[1:3]
        is_fixed_size = model_image_size != (None, None)

        yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
        input_image_shape = K.placeholder(shape=(2, ))
        boxes, scores, classes = yolo_eval(yolo_outputs,
                                           input_image_shape,
                                           score_threshold=.3,
                                           iou_threshold=.5)

        print("Load net done")

    # защита от перегрева компьютера
    f = open("/sys/class/thermal/thermal_zone0/temp", "r")
    temp = int(f.readline()) / 1000
    if temp > 55:
        robot.wait(100)
        return

    image = robot.get_frame()
    cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = Image.fromarray(cv2_im)
    if is_fixed_size:
        resized_image = image.resize(tuple(reversed(model_image_size)),
                                     Image.BICUBIC)
        image_data = np.array(resized_image, dtype='float32')
    else:
        new_image_size = (image.width - (image.width % 32),
                          image.height - (image.height % 32))
        resized_image = image.resize(new_image_size, Image.BICUBIC)
        image_data = np.array(resized_image, dtype='float32')

    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)
    out_boxes, out_scores, out_classes = sess.run(
        [boxes, scores, classes],
        feed_dict={
            yolo_model.input: image_data,
            input_image_shape: [image.size[1], image.size[0]],
            K.learning_phase(): 0
        })
    net_res = []

    for i, c in reversed(list(enumerate(out_classes))):
        predicted_class = class_names[c]
        box = out_boxes[i]
        score = out_scores[i]

        label = '{} {:.2f}'.format(predicted_class, score)
        top, left, bottom, right = box
        top = max(0, np.floor(top + 0.5).astype('int32'))
        left = max(0, np.floor(left + 0.5).astype('int32'))
        bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
        right = min(image.size[0], np.floor(right + 0.5).astype('int32'))

        net_res.append([predicted_class, (left, top), (right, bottom), score])
        print(label, (left, top), (right, bottom))
예제 #6
0
def _main(args, lf):

	### Video
	video_path = '/home/crke/Work/YAD2K/input_videos/tw_test_short.mp4' #'/home/crke/Work/YAD2K/input_videos/harder_challenge_video.mp4' #'/home/crke/Work/YAD2K/input_videos/project_video.mp4' #'/home/crke/Work/YAD2K/input_videos/challenge_video.mp4' #'
	output_path = '/home/crke/Work/YAD2K/output_videos/'
	output_Video = os.path.basename(video_path)
	output_Video = os.path.join(output_path, output_Video)

	cap = cv2.VideoCapture(video_path)
	FrameCnt = 0
	fps = cap.get(cv2.CAP_PROP_FPS)
	FrameNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
	Width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
	Height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

	isModelSize = False
	if Width == 608 and Height == 608:
		isModelSize = True

	print("Video Info:")
	print("Input: ", video_path)
	print("FPS: ", fps)
	print("FrameNum: ", FrameNum)
	print("Width: ", Width)
	print("Height: ", Height)
	print("Output: ", output_Video)

	fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # DIVX, XVID, MJPG, X264, WMV1, WMV2
	outVideo = cv2.VideoWriter(output_Video, fourcc, fps, (VIDEO_SIZE[0], VIDEO_SIZE[1]))
	###

	model_path = os.path.expanduser(args.model_path)
	assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
	anchors_path = os.path.expanduser(args.anchors_path)
	classes_path = os.path.expanduser(args.classes_path)
	test_path = os.path.expanduser(args.test_path)
	output_path = os.path.expanduser(args.output_path)

	if not os.path.exists(output_path):
		print('Creating output path {}'.format(output_path))
		os.mkdir(output_path)

	sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

	with open(classes_path) as f:
		class_names = f.readlines()
	class_names = [c.strip() for c in class_names]

	with open(anchors_path) as f:
		anchors = f.readline()
		anchors = [float(x) for x in anchors.split(',')]
		anchors = np.array(anchors).reshape(-1, 2)

	with open(CALIB_FILE_NAME, 'rb') as f:
		calib_data = pickle.load(f)
		cam_matrix = calib_data["cam_matrix"]
		dist_coeffs = calib_data["dist_coeffs"]
		img_size = calib_data["img_size"]

	with open(PERSPECTIVE_FILE_NAME, 'rb') as f:
		perspective_data = pickle.load(f)

	perspective_transform = perspective_data["perspective_transform"]
	pixels_per_meter = perspective_data['pixels_per_meter']
	orig_points = perspective_data["orig_points"]

	yolo_model = load_model(model_path)
	yolo_model.summary()
	
	# Verify model, anchors, and classes are compatible
	num_classes = len(class_names)
	num_anchors = len(anchors)
	# TODO: Assumes dim ordering is channel last
	model_output_channels = yolo_model.layers[-1].output_shape[-1]

	assert model_output_channels == (num_classes ), \
		'Mismatch between model and given anchor and class sizes. ' \
		'Specify matching anchors and classes with --anchors_path and ' \
		'--classes_path flags.'

	# assert model_output_channels == num_anchors * (num_classes + 5), \
	# 	'Mismatch between model and given anchor and class sizes. ' \
	# 	'Specify matching anchors and classes with --anchors_path and ' \
	# 	'--classes_path flags.'

	print('{} model, anchors, and classes loaded.'.format(model_path))

	# Check if model is fully convolutional, assuming channel last order.
	model_image_size = yolo_model.layers[0].input_shape[1:3]
	is_fixed_size = model_image_size != (None, None)

	# Generate colors for drawing bounding boxes.
	hsv_tuples = [(x / len(class_names), 1., 1.)
				  for x in range(len(class_names))]
	colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
	colors = list(
		map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
			colors))
	random.seed(10101)  # Fixed seed for consistent colors across runs.
	random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
	random.seed(None)  # Reset seed to default.

	# Generate output tensor targets for filtered bounding boxes.
	# TODO: Wrap these backend operations with Keras layers.
	yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
	input_image_shape = K.placeholder(shape=(2, ))
	boxes, scores, classes = yolo_eval(
		yolo_outputs,
		input_image_shape,
		score_threshold=args.score_threshold,
		iou_threshold=args.iou_threshold)

########################################################################
	# # Image for debug
	# image_file = 'test4.jpg'
	# image_shape = (720., 1280.)
	#
	# frame, image_data = preprocess_image("images/" + image_file, (int(MODEL_SIZE[0]), int(MODEL_SIZE[1])))
	# # frame       1280x720
	# # image_data  608x608
	#
	# out_boxes, out_scores, out_classes = sess.run(
	# 	[boxes, scores, classes],
	# 	feed_dict={
	# 		yolo_model.input: image_data,
	# 		input_image_shape: [(image_shape[0]), (image_shape[1])],
	# 		K.learning_phase(): 0
	# 	})
	#
	#
	# fframe = np.array(frame)
	# fframe = lf.process_image(fframe, True, show_period=1, blocking=False)
	# frame = Image.fromarray(fframe)
	#
	# l = len(out_boxes)
	# distance = np.zeros(shape=(l ,1))
	# if not len(out_boxes) == 0:
	# 	for i in range(l):
	# 		distance[i] = calculate_position(bbox=out_boxes[i],
	# 								  transform_matrix=perspective_transform,
	# 								  warped_size=UNWARPED_SIZE,
	# 								  pix_per_meter=pixels_per_meter)
	#
	# 	print('RPOS', distance)
	# 	draw_boxes(frame, out_scores, out_boxes, out_classes, class_names, colors, distance)
	#
	# else:
	# 	distance = []
	# 	#print('No Car')
	#
	# frame.save(os.path.join('out', image_file), quality=90)

	### END
########################################################################


	image_shape = (720., 1280.)
	# Read until video is completed
	while (cap.isOpened()):

		ret, frame = cap.read()

		batch = 1
		if ret == True:
			index = (FrameCnt + 1) % batch

			frame, image_data = preprocess_frame(frame, (int(MODEL_SIZE[0]), int(MODEL_SIZE[1])))

			t0 = time.time()
			out_boxes, out_scores, out_classes = sess.run(
				[boxes, scores, classes],
				feed_dict={
					yolo_model.input: image_data,
					input_image_shape: [(image_shape[0]), (image_shape[1])],
					K.learning_phase(): 0
				})
			# out_boxes is already recale to original size
			duration = time.time() - t0
			print('duration', duration)
			print('fps', 1 / duration)
			print('out_boxes', out_boxes)


			###

			# fframe = np.array(frame)
			# fframe = lf.process_image(fframe, False, show_period=40, blocking=False)
			# frame = Image.fromarray(fframe)

			###



			l = len(out_boxes)
			distance = np.zeros(shape=(l, 1))
			if not len(out_boxes) == 0:
				for i in range(l):
					distance[i] = calculate_position(bbox=out_boxes[i],
													 transform_matrix=perspective_transform,
													 warped_size=UNWARPED_SIZE,
													 pix_per_meter=pixels_per_meter)

				print('RPOS', distance)
				draw_boxes(frame, out_scores, out_boxes, out_classes, class_names, colors, distance)

			else:
				distance = []
				#print('No Car')

			pix = np.array(frame)
			pix = pix[:,:,::-1]
			outVideo.write(pix)
		# Break the loop
		else:
			break

	cap.release()
	outVideo.release()
	sess.close()
	print("Finish video convert !!!")
예제 #7
0
def main():
    global image_name
    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    model.load_weights("model.h5")
    print("loaded heta_map model from disk")

    sess = K.get_session()

    class_names = read_classes("model_data/pascal_classes.txt")
    anchors = read_anchors("model_data/yolo_anchors.txt")

    yolo_model = load_model("model_data/yolo.h5")

    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

    time.sleep(1.0)
    fps = FPS().start()

    #cap = cv2.VideoCapture(1)
    imgResp = urllib.request.urlopen(url)
    imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
    frame = cv2.imdecode(imgNp, -1)
    #ret,frame = cap.read()

    image_shape = (float(frame.shape[0]), float(frame.shape[1]))

    scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
    count = np.zeros(10, dtype=int)
    cnt = -1
    heat_cnt = 0
    while True:
        if heat_cnt == 1080:
            image_name += 1
            time.sleep(3.0)

            img = cv2.resize(frame, (128, 128), interpolation=cv2.INTER_AREA)
            img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = np.expand_dims(img, axis=0)
            img = np.expand_dims(img, axis=3)

            Y_pred = model.predict(img)

            plt.imshow(Y_pred[0, :, :, 0], cmap='hot')
            savefig("out.png")
            ##Below is the heatmap that is to be updated on firebase##
            blob = bucket.blob('main' + str(image_name) + '.png')
            file_to_upload = open('out.png', 'rb')
            blob.upload_from_file(file_to_upload)
            file_to_upload.close()

            oldrange = np.amax(Y_pred[0, :, :, 0]) - np.amin(Y_pred[0, :, :,
                                                                    0])
            if oldrange == 0:
                oldrange = 1
            newrange = 0.035
            Y_pred[0, :, :,
                   0] = ((Y_pred[0, :, :, 0] - np.amin(Y_pred[0, :, :, 0])) *
                         newrange) / oldrange
            ##Below count is the count of people in the room##

            person_count = np.sum(Y_pred[0, :, :, 0])
            db.reference('/Heatmap').update(
                {'numberOfPeople': str(person_count)})
            print(person_count)
            heat_cnt = 0

        else:
            if cnt == 9:
                counts = np.bincount(count)
                print('Number of persons are ' + str(np.argmax(counts)))
                if (np.argmax(counts)):
                    r = req.post('http://192.168.1.2:443/lightNumber',
                                 data='{"ac":true}',
                                 verify=False)
                    print(r.text)
                else:
                    r = req.post('http://192.168.1.2:443/lightNumber',
                                 data='{"ac":false}',
                                 verify=False)
                    print(r.text)
                cnt = 0
            else:
                cnt += 1

            #ret, frame = cap.read()
            imgResp = urllib.request.urlopen(url)
            imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
            frame = cv2.imdecode(imgNp, -1)

            image, image_data = preprocess_image(frame,
                                                 model_image_size=(416, 416))

            out_scores, out_boxes, out_classes = sess.run(
                [scores, boxes, classes],
                feed_dict={
                    yolo_model.input: image_data,
                    K.learning_phase(): 0
                })

            colors = generate_colors(class_names)

            count[cnt] = draw_boxes(image, out_scores, out_boxes, out_classes,
                                    class_names, colors)
            cv2.imshow('frame', np.array(image)[:, :, ::-1])

            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
            fps.update()
            heat_cnt += 1

    fps.stop()
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    cv2.destroyAllWindows()
예제 #8
0
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)

    # cap = cv.VideoCapture('.\yolo_test.mp4')
    cap = cv.VideoCapture(0)
    # fourcc = cv.VideoWriter_fourcc(*'MJPG')
    # out = cv.VideoWriter('yolo_test_output.avi',fourcc, 20.0, (int(_output_width), int(_output_height)))

    while cap.isOpened():
        _, image = cap.read()
        t1 = common.Clock()

        # Due to skip connection + max pooling in YOLO_v2, inputs must have
        # width and height as multiples of 32.
        resized_image = cv.resize(image, tuple(reversed(model_image_size)))
        resized_image = cv.cvtColor(resized_image, cv.COLOR_BGR2RGB)
        image_data = np.array(resized_image, dtype='float32')

        image_size = image.shape

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [int(image_size[0]),
                                    int(image_size[1])],
                K.learning_phase(): 0
            })

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            textLabel = '{} {:.2f}'.format(predicted_class, score)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image_size[0], np.floor(bottom + 0.5).astype('int32'))
            right = min(image_size[1], np.floor(right + 0.5).astype('int32'))
            print(textLabel, (left, top), (right, bottom))

            (retval, baseLine) = cv.getTextSize(textLabel,
                                                cv.FONT_HERSHEY_COMPLEX, 1, 1)
            textOrg = (left, top)

            cv.rectangle(image, (left, top), (right, bottom), (255, 255, 0), 2)
            cv.rectangle(
                image, (textOrg[0] - 5, textOrg[1] + baseLine - 5),
                (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5),
                (0, 0, 0), 2)
            cv.rectangle(
                image, (textOrg[0] - 5, textOrg[1] + baseLine - 5),
                (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5),
                (255, 255, 255), -1)
            cv.putText(image, textLabel, textOrg, cv.FONT_HERSHEY_DUPLEX, 1,
                       (0, 0, 0), 1)

        t2 = common.Clock() - t1
        common.draw_str(image, (10, 20), 'FPS: %.1f' % (1000 // (t2 * 1000)))
        cv.imshow("Hargow Classifier", image)
        # out.write(image)
        if cv.waitKey(1) == 27:
            break

    cap.release()
    # out.release()
    cv.destroyAllWindows()
    sess.close()
예제 #9
0
def yoloThread():
    global frames,times
    model_path =   scriptFolder+"tiny.h5"     #Model weights
    sess = K.get_session()                    
    print("[PiCam] Loading anchors file...")
    anchors = [1.08,1.19,3.42,4.41,6.63,11.38,9.42,5.11,16.62,10.52] #Tiny Yolo anchors' values 
    anchors = np.array(anchors).reshape(-1, 2)
    print("[PiCam] Loading yolo model ({})...".format(scriptFolder+"tiny.h5"))
    yolo_model = load_model(model_path)  #Loading Tiny YOLO
    num_anchors = len(anchors)      
    print('[PiCam] YOLO model loaded !'.format(model_path))

    model_image_size = yolo_model.layers[0].input_shape[1:3] #Get input shape
    yolo_outputs = yolo_head(yolo_model.output, anchors, 20) 
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,input_image_shape,score_threshold=0.3,iou_threshold=0.4)
    num = 0 #Starting Photo's name
    old_time = 0.0 #Latest time

    print("[PiCam] YOLO Thread started!")
### Loop:
    M = cv2.getRotationMatrix2D((w/2,h/2),90,1)
    while True:
        if len(frames) != 0:
            try:
                cv2.waitKey(17) 
                mat = frames[0] #Get First frame with movements
                mat = cv2.warpAffine(mat,M,(w,h))
                cv2.imwrite('/tmp/img.jpg', mat)
                mat = cv2.resize(mat,(model_image_size[0],model_image_size[1]))
                in_mat = np.array(mat,dtype='float32')
                in_mat /= 255.  #Removing mean
                in_mat = np.expand_dims(in_mat, 0)
                if (times[0] - old_time) > time_chunck:
                    #Searching for detection:
                    out_boxes, out_scores, out_classes = sess.run([boxes, scores, classes],feed_dict={yolo_model.input: in_mat,input_image_shape: [mat.shape[1], mat.shape[0]],K.learning_phase(): 0})
                    if len(out_boxes) > 0:
                        #xs,ys = [],[]  #X's and Y's coordinate
                        persons = []
                        for i, c in reversed(list(enumerate(out_classes))):
                            if c == 14: #14 is the label for persons
                                box = out_boxes[i]
                                top, left, bottom, right = box
                                top = max(0, np.floor(top + 0.5).astype('int32'))
                                left = max(0, np.floor(left + 0.5).astype('int32'))
                                bottom = min(mat.shape[1], np.floor(bottom + 0.5).astype('int32'))
                                right = min(mat.shape[0], np.floor(right + 0.5).astype('int32'))
                                persons.append({"top": top, "left": left, "bottom": bottom, "right": right})
                                #xs.append(left+i)
                                #xs.append(right-i)
                                #ys.append(top+i)
                                #ys.append(bottom-i)
                        # Drow rersons.
                        for p in persons:
                            cv2.rectangle(mat, (p["left"], p["bottom"]), (p["right"], p["top"]), (0, 225, 0), 2)
                        # Save image.
                        split_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S.%f").split(' ')
                        img_dir_path = scriptFolder + "imgs/" + split_datetime[0]
                        os.makedirs(img_dir_path, exist_ok=True)
                        img_name = img_dir_path + "/" + split_datetime[1] + ".png"
                        #img_name = scriptFolder+"imgs/{}.png".format(num)
                        # cv2.imwrite(img_name,mat[min(ys):max(ys),min(xs):max(xs)]) #Only saving the rectangle in which persons' got detected
                        ### D.Platon changes. Save whole frame with highlighted person.
                        #min_xs = min(xs)
                        #min_ys = min(ys)
                        #max_xs = max(xs)
                        #max_ys = max(ys)
                        #cv2.rectangle(mat, (min_xs, min_ys), (max_xs, max_ys), (0, 0, 255), 2)
                        cv2.imwrite(img_name, mat)
                        out_s = "[{}] Detected person (taken {}s)!\n".format(time.strftime("%H:%M:%S"),round(time.time()-times[0])) #Log output
                        print(out_s)
                        flog.write(out_s)
                        flog.flush()
                        #try: #Preventig Problems like no connection #I've used subprocess to set a timeout
                        #    subprocess.call("telegram-cli -W -e \'send_photo {} {} \' ".format(telegram_user,img_name),timeout=30,shell=True)
                        #except Exception as exc:
                        #    print("[PiCam] Some error occured in YOLO Thread ({}) :".format(time.strftime("%H:%M:%S")),exc)
                        num += 1
                        old_time = times[0] #Updating detection time
            except Exception as ex:
                print("[PiCam] Some error occured in YOLO Thread ({}) :".format(time.strftime("%H:%M:%S")),ex)
            del times[0]   #Deleting first Detection time
            del frames[0]  #Deleting first Frame
        cv2.waitKey(50)
예제 #10
0
def pipeline(im):

    image= PIL.Image.fromarray(im)
    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(
        yolo_outputs,
        input_image_shape,
        score_threshold=.3,
        iou_threshold=.5)

    if is_fixed_size:  # TODO: When resizing we can use minibatch input.
        resized_image = image.resize(tuple(reversed(model_image_size)), Image.BICUBIC)
        image_data = np.array(resized_image, dtype='float32')
    else:
        # Due to skip connection + max pooling in YOLO_v2, inputs must have
        # width and height as multiples of 32.
        new_image_size = (image.width - (image.width % 32),
                            image.height - (image.height % 32))
        resized_image = image.resize(new_image_size, Image.BICUBIC)
        image_data = np.array(resized_image, dtype='float32')

    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

    out_boxes, out_scores, out_classes = sess.run(
        [boxes, scores, classes],
        feed_dict={
            yolo_model.input: image_data,
            input_image_shape: [image.size[1], image.size[0]],
            K.learning_phase(): 0
        })
    print('Found {} boxes '.format(len(out_boxes)))

    font = ImageFont.truetype(
        font='font/FiraMono-Medium.otf',
        size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
    thickness = (image.size[0] + image.size[1]) // 300

    for i, c in reversed(list(enumerate(out_classes))):
        predicted_class = class_names[c]
        box = out_boxes[i]
        score = out_scores[i]

        label = '{} {:.2f}'.format(predicted_class, score)
        draw = ImageDraw.Draw(image)
        label_size = draw.textsize(label, font)

        top, left, bottom, right = box
        top = max(0, np.floor(top + 0.5).astype('int32'))
        left = max(0, np.floor(left + 0.5).astype('int32'))
        bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
        right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
        print(label, (left, top), (right, bottom))
        

        if top - label_size[1] >= 0:
            text_origin = np.array([left, top - label_size[1]])
        else:
            text_origin = np.array([left, top + 1])


        for i in range(thickness):
            draw = ImageDraw.Draw(image)
            draw.rectangle(
                [left + i, top + i, right - i, bottom - i],
                outline=colors[c])
            draw.rectangle(
                [tuple(text_origin), tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

    return np.array(image)
예제 #11
0
def getFrame(sec):
    vidcap.set(cv2.CAP_PROP_POS_MSEC, sec * 1000)
    hasFrames, image = vidcap.read()
    if hasFrames:
        path = "/Users/prachis/pet_projects/YOLOv2_keras/images/"
        cv2.imwrite(os.path.join(path, "image" + str(count) + ".jpg"), image)
        # cv2.imwrite("image"+str(count)+".jpg", image)     # save frame as JPG file

        input_image_name = "image" + str(count) + ".jpg"

        # Obtaining the dimensions of the input image
        input_image = Image.open(
            "/Users/prachis/pet_projects/YOLOv2_keras/images/" +
            input_image_name)
        width, height = input_image.size
        width = np.array(width, dtype=float)
        height = np.array(height, dtype=float)

        # Assign the shape of the input image to image_shapr variable
        image_shape = (height, width)

        # Loading the classes and the anchor boxes that are provided in the madel_data folder
        class_names = read_classes("model_data/coco_classes.txt")
        anchors = read_anchors("model_data/yolo_anchors.txt")

        # Load the pretrained model. Please refer the README file to get info on how to obtain the yolo.h5 file
        yolo_model = load_model("model_data/yolo.h5")

        # Print the summery of the model
        # yolo_model.summary()

        # Convert final layer features to bounding box parameters
        yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

        # Now yolo_eval function selects the best boxes using filtering and non-max suppression techniques.
        # If you want to dive in more to see how this works, refer keras_yolo.py file in yad2k/models
        boxes, scores, classes = yolo_eval(yolo_outputs, image_shape)

        # Initiate a session
        sess = K.get_session()

        # Preprocess the input image before feeding into the convolutional network
        image, image_data = preprocess_image(
            "/Users/prachis/pet_projects/YOLOv2_keras/images/" +
            input_image_name,
            model_image_size=(608, 608))

        # Run the session
        out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                      feed_dict={
                                                          yolo_model.input:
                                                          image_data,
                                                          K.learning_phase(): 0
                                                      })

        # Print the results
        print('Found {} boxes for {}'.format(len(out_boxes), input_image_name))
        # Produce the colors for the bounding boxs
        colors = generate_colors(class_names)
        # Draw the bounding boxes
        draw_boxes(image, out_scores, out_boxes, out_classes, class_names,
                   colors)
        # Apply the predicted bounding boxes to the image and save it
        image.save(os.path.join(
            "/Users/prachis/pet_projects/YOLOv2_keras/out/", input_image_name),
                   quality=90)
        output_image = imageio.imread(
            os.path.join("/Users/prachis/pet_projects/YOLOv2_keras/out/",
                         input_image_name))

    return hasFrames
예제 #12
0
    def predict(self, test_path, output_path=None, draw_box=True, targets=None, targets_threshold=0.8):
        """
        检测图像
        :param test_path: 要检测的文件夹路径
        :param output_path: 输出文件夹路径
        :return: bboxes list
        """
        assert test_path is not None
        if targets is not None:
            assert isinstance(targets,list)

        sess = K.get_session()

        # 加载类别
        with open(self._classes_path) as f:
            class_names = [c.strip() for c in f.readlines()]

        # 加载anchor尺寸
        with open(self._anchors_path) as f:
            anchors = f.readline()
            anchors = [float(x) for x in anchors.split(',')]
            anchors = np.array(anchors).reshape(-1, 2)

        if output_path is not None and not os.path.exists(output_path):
            os.mkdir(output_path)

        num_classes = len(class_names)  # default:80
        num_anchors = len(anchors)  # default:5*2
        # TODO: Assumes dim ordering is channel last
        model_output_channels = self.model.layers[-1].output_shape[-1]
        assert model_output_channels == num_anchors * (num_classes + 5), \
            'Mismatch between model and given anchor and class sizes. ' \
            'Specify matching anchors and classes with --anchors_path and ' \
            '--classes_path flags.'

        # Check if model is fully convolutional, assuming channel last order.
        model_image_size = self.model.layers[0].input_shape[1:3]
        is_fixed_size = model_image_size != (None, None)

        # Generate output tensor targets for filtered bounding boxes.
        # TODO: Wrap these backend operations with Keras layers.
        yolo_outputs = yolo_head(self.model.output, anchors, len(class_names))
        input_image_shape = K.placeholder(shape=(2,))
        boxes, scores, classes = yolo_eval(
            yolo_outputs,
            input_image_shape,
            score_threshold=self.score_threshold,
            iou_threshold=self.iou_threshold)

        # Generate colors for drawing bounding boxes.
        hsv_tuples = [(x / len(class_names), 1., 1.)
                      for x in range(len(class_names))]
        colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                colors))
        random.seed(10101)  # Fixed seed for consistent colors across runs.
        random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
        random.seed(None)  # Reset seed to default.

        images_bboxes = {}

        for image_file in tqdm(os.listdir(test_path)):
            try:
                image_type = imghdr.what(os.path.join(test_path, image_file))
                if not image_type:
                    continue
            except IsADirectoryError:
                continue

            image = Image.open(os.path.join(test_path, image_file))
            if is_fixed_size:  # TODO: When resizing we can use minibatch input.
                resized_image = image.resize(
                    tuple(reversed(model_image_size)), Image.BICUBIC)
                image_data = np.array(resized_image, dtype='float32')
            else:
                # Due to skip connection + max pooling in YOLO_v2, inputs must have
                # width and height as multiples of 32.
                new_image_size = (image.width - (image.width % 32),
                                  image.height - (image.height % 32))
                resized_image = image.resize(new_image_size, Image.BICUBIC)
                image_data = np.array(resized_image, dtype='float32')

            image_data /= 255.
            image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

            out_boxes, out_scores, out_classes = sess.run(
                [boxes, scores, classes],
                feed_dict={
                    self.model.input: image_data,
                    input_image_shape: [image.size[1], image.size[0]],
                    K.learning_phase(): 0
                })
            # print('Found {} boxes for {}'.format(len(out_boxes), image_file))

            font = ImageFont.truetype(
                font='FiraMono-Medium.otf',
                size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
            thickness = (image.size[0] + image.size[1]) // 300

            bboxes = []

            for i, c in reversed(list(enumerate(out_classes))):
                predicted_class = class_names[c]
                box = out_boxes[i]
                score = out_scores[i]
                
                if targets is not None:
                    if c not in targets or score< targets_threshold:
                        continue

                label = '{} {:.2f}'.format(predicted_class, score)

                top, left, bottom, right = box
                top = max(0, np.floor(top + 0.5).astype('int32'))
                left = max(0, np.floor(left + 0.5).astype('int32'))
                bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
                right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
                # print(label, (left, top), (right, bottom))

                bboxes.append((left, top, right - left, bottom - top, c, predicted_class, score))
                # 绘制BBOX
                if output_path is not None and draw_box:
                    draw = ImageDraw.Draw(image)
                    label_size = draw.textsize(label, font)

                    if top - label_size[1] >= 0:
                        text_origin = np.array([left, top - label_size[1]])
                    else:
                        text_origin = np.array([left, top + 1])

                    # My kingdom for a good redistributable image drawing library.
                    for j in range(thickness):
                        draw.rectangle(
                            [left + j, top + j, right - j, bottom - j],
                            outline=colors[c])
                    draw.rectangle(
                        [tuple(text_origin), tuple(text_origin + label_size)],
                        fill=colors[c])
                    draw.text(text_origin, label, fill=(0, 0, 0), font=font)
                    del draw
            # 有符合条件的bbox才保存并记录
            if len(bboxes)>0:
                images_bboxes.update({image_file: bboxes})
                if output_path is not None:
                    image.save(os.path.join(output_path, image_file), quality=100)

        # sess.close()
        return images_bboxes
#

# Summary of the layers the model contains.

yolo_model.summary()

#

## 3.3) Convert output of the model to usable bounding box tensors

# `yolo_head` function definition in the file ['keras_yolo.py'] --> (https://github.com/allanzelener/YAD2K/blob/master/yad2k/models/keras_yolo.py).

# Run cell to have tensor pass through non-trivial processing and conversion
yolo_outputs = yolo_head(
    yolo_model.output, anchors, len(class_names)
)  # have added yolo_outputs to graph and gives all predicted boxes of yolo_model in correct format

# This set of 4 tensors is ready to be used as input by `yolo_eval` function.

### 3.4) Filtering boxes ###

# Use previously implemented `yolo_eval`.

scores, boxes, classes = yolo_eval(
    yolo_outputs,
    image_shape)  # to perform filtering and select only the best boxes

### 3.5) Run the graph on an image ###
# Will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.
예제 #14
0
def main():

    sess = K.get_session()

    class_names = read_classes("model_data/coco_classes.txt")
    anchors = read_anchors("model_data/yolo_anchors.txt")
    image_shape = (480., 640.)
    # 该电脑的摄像头像素为(480,640)

    yolo_model = load_model("model_data/yolo.h5")

    # 查看层数
    # yolo_model.summary()

    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

    scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

    #以上为框架启动 >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    #下面为线程执行 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

    print('主线程开始时间:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
    print("-----------------------------------------------------------------------------------------------------------")

    yellow_time = threading.Thread(target=task_thread1, name='T1')
    yellow_time.start()
    print('行人识别开始时间:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
    # time.sleep(4.5)
    # 添加识别程序
    persones = only_detection_number(sess, 'persones',yolo_model,scores,boxes,classes,class_names)
    # 添加人行道绿灯算法
    t = Person_green_light(persones, 10, 1.2)
    #数据库语句
    cursor.execute("insert into lightdata(dtime,lno,dnumber) values(%s,%s,%s)",(time.strftime("%Y-%m-%d"),1,persones))
    #数据库语句
    print('行人识别结束时间:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
    yellow_time.join()

    persones_green_time = threading.Thread(target=task_thread2(t), name='T2')  # 预留五秒给算法做检测使用
    persones_green_time.start()  # 行人绿灯时间启动
    persones_green_time.join()

    wait_time = threading.Thread(target=task_thread3, name='T3')
    wait_time.start()  # wait_time在此处作为一段暂停给算法使用的时间

    print('车辆识别开始时间:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
    # 添加车行道绿地算法
    cars = only_detection_number(sess, 'cars',yolo_model,scores,boxes,classes,class_names)
    t = Car_green_light(cars, 5, 5, 1, 4, 1)
    # 数据库语句
    cursor.execute("insert into lightdata(dtime,lno,dnumber) values(%s,%s,%s)",(time.strftime("%Y-%m-%d"),2,cars))
    # 数据库语句
    print('车辆识别结束时间:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
    wait_time.join()  # 主程序暂停等待次程序执行完毕

    cars_green_time = threading.Thread(target=task_thread4(t), name='T')
    cars_green_time.start()
    # 数据库语句
    conn.commit()
    # 数据库语句
    cars_green_time.join()  # 主程序暂停等待次程序执行完毕

    print("----------------------------------------------------------------------------------------------------------")
    print('主线程结束时间:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    test_path = os.path.expanduser(args.test_path)
    output_path = os.path.expanduser(args.output_path)

    if not os.path.exists(output_path):
        print('Creating output path {}'.format(output_path))
        os.mkdir(output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)

    #video url
    videoUrl = "C:\\Users\\Jason\\Scripts\\YOLO\\YAD2K-master\\images\\20200102   Surf Cam Surf spot Twin Lion beach in Toucheng Yilan County Taiwan ROC.mp4"  #"https://thbcctv01.thb.gov.tw/T1A-9K+700"
    cap = cv2.VideoCapture(videoUrl)

    while (True):
        try:
            ret, frame = cap.read()
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
        except:
            cap = cv2.VideoCapture(videoUrl)
            import time
            time.sleep(0.5)
            continue

        if cv2.waitKey(1) & 0xFF == ord('q'):
            out = cv2.imwrite('capture.jpg', frame)
            break
        frame = cv2.resize(frame, tuple(reversed(model_image_size)),
                           Image.BICUBIC)
        image_data = np.array(frame, dtype='float32')
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [frame.shape[1], frame.shape[0]],
                K.learning_phase(): 0
            })
        # print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * frame.shape[1] +
                                                0.5).astype('int32'))
        thickness = (frame.shape[0] + frame.shape[1]) // 300

        img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)

            draw = ImageDraw.Draw(img)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(frame.shape[1],
                         np.floor(bottom + 0.5).astype('int32'))
            right = min(frame.shape[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw
        cv_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
        resized = cv2.resize(cv_img, (640, 480), interpolation=cv2.INTER_AREA)
        cv2.imshow('frame', resized)
        # image.save(os.path.join(output_path, image_file), quality=90)
    sess.close()
    cap.release()
    cv2.destroyAllWindows()
예제 #16
0
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    start_time = timer()
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    input_path = os.path.expanduser(args.input_path)
    output_path = os.path.expanduser(args.output_path)

    if not os.path.exists(output_path):
        print('Creating output path {}'.format(output_path))
        os.mkdir(output_path)

    logging.basicConfig(filename=output_path + "/tracking.log",
                        level=logging.DEBUG)

    #parse car positions and angles
    print("Parsing timestamps and oxts files...")
    if args.oxts.startswith('..'):
        parse_oxts(input_path + "/" + args.oxts,
                   input_path + "/" + args.time_stamps)
    else:
        parse_oxts(args.oxts, args.time_stamps)
    print("Done. Data acquired.")

    sess = K.get_session()  # dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)

    frame_idx = 0

    for image_file in os.listdir(input_path):
        try:
            image_type = imghdr.what(os.path.join(input_path, image_file))
            if not image_type:
                continue
        except IsADirectoryError:
            continue

        image = Image.open(os.path.join(input_path, image_file))
        if is_fixed_size:  # TODO: When resizing we can use minibatch input.
            resized_image = image.resize(tuple(reversed(model_image_size)),
                                         Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
        else:
            # Due to skip connection + max pooling in YOLO_v2, inputs must have
            # width and height as multiples of 32.
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            resized_image = image.resize(new_image_size, Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
            print(image_data.shape)

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        logging.info("Img: " + str(image_file))

        boxes_data = []

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            box = [max(0, v) for v in box]  #sometimes it's negative.
            score = out_scores[i]

            # log positions

            obj_coord = np.array([])

            if predicted_class in [
                    "person", "bicycle", "car", "motorbike", "bus", "train",
                    "truck"
            ] and score > 0.2:  #object and classes to track
                if predicted_class in ["bus", "truck"]:  #vehicle
                    predicted_class = "car"
                obj_coord = computeCoordinates(box, frame_idx)
                if obj_coord is not None:
                    hist = histogram(image, box)
                    #create data and store it
                    boxes_data.append({
                        'predicted_class': predicted_class,
                        'score': float(score),
                        'coord': obj_coord,
                        'hist': hist
                    })
                    logging.info(predicted_class + " :" + str(obj_coord) +
                                 " | " + str(np.linalg.norm(obj_coord)))

            # end log positions
            if saveImages:
                if obj_coord is not None:
                    label = '{} {:.2f} {} {:.2f}'.format(
                        predicted_class, score, str(obj_coord),
                        np.linalg.norm(obj_coord))
                else:
                    label = '{} {:.2f}'.format(predicted_class, score)
                draw = ImageDraw.Draw(image)
                label_size = draw.textsize(label, font)

                top, left, bottom, right = box
                top = max(0, np.floor(top + 0.5).astype('int32'))
                left = max(0, np.floor(left + 0.5).astype('int32'))
                bottom = min(image.size[1],
                             np.floor(bottom + 0.5).astype('int32'))
                right = min(image.size[0],
                            np.floor(right + 0.5).astype('int32'))
                print(label, (left, top), (right, bottom))

                if top - label_size[1] >= 0:
                    text_origin = np.array([left, top - label_size[1]])
                else:
                    text_origin = np.array([left, top + 1])

                for i in range(thickness):
                    draw.rectangle([left + i, top + i, right - i, bottom - i],
                                   outline=colors[c])
                draw.rectangle(
                    [tuple(text_origin),
                     tuple(text_origin + label_size)],
                    fill=colors[c])
                draw.text(text_origin, label, fill=(0, 0, 0), font=font)
                del draw

        frame_idx += 1
        global data_frames
        data_frames.append(boxes_data)
        if saveImages:
            image.save(os.path.join(output_path, image_file), quality=80)

    sess.close()
    now = timer()
    start_trj_time = timer()
    print("Time elapsed CNN: " + str(now - start_time) + " seconds")
    print("Calculating trajectories...")
    calculate_trajectories()

    now = timer()
    print("Done. Time elapsed: " + str(now - start_trj_time) + " seconds\n\n")
    print("Total time elapsed: " + str(now - start_time) + " seconds")
예제 #17
0
        classes,
        max_boxes=max_boxes,
        iou_threshold=iou_threshold)

    return scores, boxes, classes


pather = "/home/robot-tumas/Desktop/projects/Class/find-people/"

#Load pre_trained model
sess = K.get_session()
class_names = read_classes(pather + "classes.txt")
image_shape = (720., 1280.)
yolo_model = load_model(pather + "compModel.pb")
#yolo_model.summary()
yolo_outputs = yolo_head(tf.convert_to_tensor(yolo_model.output), anchors,
                         len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
scores, boxes, classes = yolo_eval(yolo_outputs, input_image_shape)
import imageio


def predict(sess, image_file):

    # Preprocess your image
    image, image_data = preprocess_image(image_file,
                                         model_image_size=(608, 608))

    out_scores, out_boxes, out_classes = sess.run(
        [scores, boxes, classes],
        feed_dict={
            yolo_model.input: image_data,
예제 #18
0
def _main():
    voc_path = os.path.expanduser('~/datasets/VOCdevkit/pascal_voc_07_12.hdf5')
    classes_path = os.path.expanduser('model_data/pascal_classes.txt')

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    voc = h5py.File(voc_path, 'r')
    image = PIL.Image.open(io.BytesIO(voc['train/images'][28]))
    orig_size = np.array([image.width, image.height])
    orig_size = np.expand_dims(orig_size, axis=0)

    # Image preprocessing.
    image = image.resize((416, 416), PIL.Image.BICUBIC)
    image_data = np.array(image, dtype=np.float)
    image_data /= 255.

    # Box preprocessing.
    # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
    boxes = voc['train/boxes'][28]
    boxes = boxes.reshape((-1, 5))
    # Get extents as y_min, x_min, y_max, x_max, class for comparision with
    # model output.
    boxes_extents = boxes[:, [2, 1, 4, 3, 0]]

    # Get box parameters as x_center, y_center, box_width, box_height, class.
    boxes_xy = 0.5 * (boxes[:, 3:5] + boxes[:, 1:3])
    boxes_wh = boxes[:, 3:5] - boxes[:, 1:3]
    boxes_xy = boxes_xy / orig_size
    boxes_wh = boxes_wh / orig_size
    boxes = np.concatenate((boxes_xy, boxes_wh, boxes[:, 0:1]), axis=1)

    # Precompute detectors_mask and matching_true_boxes for training.
    # Detectors mask is 1 for each spatial position in the final conv layer and
    # anchor that should be active for the given boxes and 0 otherwise.
    # Matching true boxes gives the regression targets for the ground truth box
    # that caused a detector to be active or 0 otherwise.
    anchors = COCO_ANCHORS
    detectors_mask_shape = (13, 13, 5, 1)
    matching_boxes_shape = (13, 13, 5, 5)
    detectors_mask, matching_true_boxes = preprocess_true_boxes(
        boxes, anchors, [416, 416])

    # Create model input layers.
    image_input = Input(shape=(416, 416, 3))
    boxes_input = Input(shape=(None, 5))
    detectors_mask_input = Input(shape=detectors_mask_shape)
    matching_boxes_input = Input(shape=matching_boxes_shape)

    print(boxes)
    print(boxes_extents)
    print(np.where(detectors_mask == 1)[:-1])
    print(matching_true_boxes[np.where(detectors_mask == 1)[:-1]])

    # Create model body.
    model_body = yolo_body(image_input, len(anchors), len(class_names))
    model_body = Model(image_input, model_body.output)
    # Place model loss on CPU to reduce GPU memory usage.
    with tf.device('/cpu:0'):
        # TODO: Replace Lambda with custom Keras layer for loss.
        model_loss = Lambda(yolo_loss,
                            output_shape=(1, ),
                            name='yolo_loss',
                            arguments={
                                'anchors': anchors,
                                'num_classes': len(class_names)
                            })([
                                model_body.output, boxes_input,
                                detectors_mask_input, matching_boxes_input
                            ])
    model = Model(
        [image_input, boxes_input, detectors_mask_input, matching_boxes_input],
        model_loss)
    model.compile(
        optimizer='adam', loss={
            'yolo_loss': lambda y_true, y_pred: y_pred
        })  # This is a hack to use the custom loss function in the last layer.

    # Add batch dimension for training.
    image_data = np.expand_dims(image_data, axis=0)
    boxes = np.expand_dims(boxes, axis=0)
    detectors_mask = np.expand_dims(detectors_mask, axis=0)
    matching_true_boxes = np.expand_dims(matching_true_boxes, axis=0)

    num_steps = 1000
    # TODO: For full training, put preprocessing inside training loop.
    # for i in range(num_steps):
    #     loss = model.train_on_batch(
    #         [image_data, boxes, detectors_mask, matching_true_boxes],
    #         np.zeros(len(image_data)))
    model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
              np.zeros(len(image_data)),
              batch_size=1,
              epochs=num_steps)
    model.save_weights('overfit_weights.h5')

    # Create output variables for prediction.
    yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=.3,
                                       iou_threshold=.9)

    # Run prediction on overfit image.
    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.
    out_boxes, out_scores, out_classes = sess.run(
        [boxes, scores, classes],
        feed_dict={
            model_body.input: image_data,
            input_image_shape: [image.size[1], image.size[0]],
            K.learning_phase(): 0
        })
    print('Found {} boxes for image.'.format(len(out_boxes)))
    print(out_boxes)

    # Plot image with predicted boxes.
    image_with_boxes = draw_boxes(image_data[0], out_boxes, out_classes,
                                  class_names, out_scores)
    plt.imshow(image_with_boxes, interpolation='nearest')
    plt.show()
def _main(args):
    def predict(sess, image):
        # Preprocess your image
        image, image_data = preprocess_image(image,
                                             model_image_size=(416, 416))

        # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
        # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
        out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],
                                                      feed_dict={
                                                          yolo_model.input:
                                                          image_data,
                                                          K.learning_phase(): 0
                                                      })

        # Print predictions info
        print('Found {} boxes'.format(len(out_boxes)))
        # Generate colors for drawing bounding boxes.
        colors = generate_colors(class_names)
        # Draw bounding boxes on the image file
        out_image = draw_boxes(image, out_scores, out_boxes, out_classes,
                               class_names, colors)

        return out_image, out_scores, out_boxes, out_classes

    cap = cv2.VideoCapture(args.input)
    if (cap.isOpened() == False):
        print("Error opening video capture device.")
        return
    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)

    image_shape = (height, width)

    sess = K.get_session()

    class_names = read_classes(CLASS_LIST_PATH)
    anchors = read_anchors(YOLO_ANCHORS_PATH)
    yolo_model, model = create_model(anchors, class_names)
    yolo_model.load_weights(args.weights)
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)

    while True:
        ret, original_frame = cap.read()

        cv2.imshow('original', original_frame)

        process_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
        process_frame = Image.fromarray(process_frame)
        process_frame, out_scores, out_boxes, out_classes = predict(
            sess, process_frame)
        process_frame = cv2.cvtColor(process_frame, cv2.COLOR_BGR2RGB)

        cv2.imshow('output', process_frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
예제 #20
0
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    test_path = os.path.expanduser(args.test_path)
    output_path = os.path.expanduser(args.output_path)

    if not os.path.exists(output_path):
        print('Creating output path {}'.format(output_path))
        os.mkdir(output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(
        yolo_outputs,
        input_image_shape,
        score_threshold=args.score_threshold,
        iou_threshold=args.iou_threshold)

    for image_file in os.listdir(test_path):
        try:
            image_type = imghdr.what(os.path.join(test_path, image_file))
            if not image_type:
                continue
        except IsADirectoryError:
            continue

        image = Image.open(os.path.join(test_path, image_file))
        if is_fixed_size:  # TODO: When resizing we can use minibatch input.
            resized_image = image.resize(
                tuple(reversed(model_image_size)), Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
        else:
            # Due to skip connection + max pooling in YOLO_v2, inputs must have
            # width and height as multiples of 32.
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            resized_image = image.resize(new_image_size, Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
            print(image_data.shape)

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(
            font='font/FiraMono-Medium.otf',
            size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)

            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle(
                    [left + i, top + i, right - i, bottom - i],
                    outline=colors[c])
            draw.rectangle(
                [tuple(text_origin), tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        image.save(os.path.join(output_path, image_file), quality=90)
    sess.close()
예제 #21
0
def _main(args):
    voc_path = os.path.expanduser(args.data_path)
    #classes_path = os.path.expanduser(args.classes_path)
    anchors_path = os.path.expanduser(args.anchors_path)

    #with open(classes_path) as f:
    #    class_names = f.readlines()
    #class_names = [c.strip() for c in class_names]
    class_names = ['circle', 'square']

    if os.path.isfile(anchors_path):
        with open(anchors_path) as f:
            anchors = f.readline()
            anchors = [float(x) for x in anchors.split(',')]
            anchors = np.array(anchors).reshape(-1, 2)
    else:
        anchors = YOLO_ANCHORS
    ''' file load ''
    voc = h5py.File(voc_path, 'r')
    image = PIL.Image.open(io.BytesIO(voc['train/images'][28]))
    orig_size = np.array([image.width, image.height])
    orig_size = np.expand_dims(orig_size, axis=0)

    # Image preprocessing.
    image = image.resize((416, 416), PIL.Image.BICUBIC)
    image_data = np.array(image, dtype=np.float)
    image_data /= 255.
    '''

    # Generate shape w/pymrt
    image_data = np.empty((2, 416, 416, 3), dtype=np.float)
    circ = geo.circle(shape=(416, 416), radius=40, position=0.5)
    image_data[0, :, :, 0] = circ
    image_data[0, :, :, 1] = circ
    image_data[0, :, :, 2] = circ

    sq = geo.square(shape=(416, 416), side=40, position=0.5)
    image_data[1, :, :, 0] = sq
    image_data[1, :, :, 1] = sq
    image_data[1, :, :, 2] = sq

    #image = image_data
    orig_size = np.array([416, 416])

    # Box preprocessing.
    # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
    ## boxes = voc['train/boxes'][28]
    boxes = np.asarray([[0, 168, 168, 248, 248], [1, 168, 168, 248, 248]])
    boxes = boxes.reshape((-1, 5))
    # Get extents as y_min, x_min, y_max, x_max, class for comparision with
    # model output.
    boxes_extents = boxes[:, [2, 1, 4, 3, 0]]

    # Get box parameters as x_center, y_center, box_width, box_height, class.
    boxes_xy = 0.5 * (boxes[:, 3:5] + boxes[:, 1:3])
    boxes_wh = boxes[:, 3:5] - boxes[:, 1:3]
    boxes_xy = boxes_xy / orig_size  # convert to relative coordinates
    boxes_wh = boxes_wh / orig_size
    boxes = np.concatenate((boxes_xy, boxes_wh, boxes[:, 0:1]), axis=1)

    # Precompute detectors_mask and matching_true_boxes for training.
    # Detectors mask is 1 for each spatial position in the final conv layer and
    # anchor that should be active for the given boxes and 0 otherwise.
    # Matching true boxes gives the regression targets for the ground truth box
    # that caused a detector to be active or 0 otherwise.
    detectors_mask_shape = (13, 13, 5, 1)
    matching_boxes_shape = (13, 13, 5, 5)

    def get_detector_mask(boxes, anchors):
        detectors_mask = [0 for i in range(len(boxes))]
        matching_true_boxes = [0 for i in range(len(boxes))]
        for i, box in enumerate(boxes):
            box = box.reshape((-1, 5))
            detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(
                box, anchors, [416, 416])

        return np.array(detectors_mask), np.array(matching_true_boxes)

    detectors_mask, matching_true_boxes = get_detector_mask(boxes, anchors)
    #detectors_mask, matching_true_boxes = preprocess_true_boxes(boxes, anchors,
    #                                                            [416, 416])

    # Create model input layers.
    image_input = Input(shape=(416, 416, 3))
    boxes_input = Input(shape=(None, 5))
    detectors_mask_input = Input(shape=detectors_mask_shape)
    matching_boxes_input = Input(shape=matching_boxes_shape)

    #import pdb; pdb.set_trace()

    print('Boxes:')
    print(boxes)
    ##print('Box corners:')
    ##print(boxes_extents)
    print('Active detectors:')
    print(np.where(detectors_mask == 1)[:-1])
    print('Matching boxes for active detectors:')
    print(matching_true_boxes[np.where(detectors_mask == 1)[:-1]])

    # Create model body.
    model_body = yolo_body(image_input, len(anchors), len(class_names))
    model_body = Model(image_input, model_body.output)
    # Place model loss on CPU to reduce GPU memory usage.
    with tf.device('/cpu:0'):
        # TODO: Replace Lambda with custom Keras layer for loss.
        model_loss = Lambda(yolo_loss,
                            output_shape=(1, ),
                            name='yolo_loss',
                            arguments={
                                'anchors': anchors,
                                'num_classes': len(class_names)
                            })([
                                model_body.output, boxes_input,
                                detectors_mask_input, matching_boxes_input
                            ])
    model = Model(
        [image_input, boxes_input, detectors_mask_input, matching_boxes_input],
        model_loss)
    model.compile(
        optimizer='adam', loss={
            'yolo_loss': lambda y_true, y_pred: y_pred
        })  # This is a hack to use the custom loss function in the last layer.

    # Add batch dimension for training.
    #image_data = image_data[0,:,:,:]
    #image_data = np.expand_dims(image_data, axis=0)
    boxes = np.expand_dims(boxes, axis=1)
    #detectors_mask = np.expand_dims(detectors_mask, axis=0)
    #matching_true_boxes = np.expand_dims(matching_true_boxes, axis=0)
    num_steps = 1000
    # TODO: For full training, put preprocessing inside training loop.
    # for i in range(num_steps):
    #     loss = model.train_on_batch(
    #         [image_data, boxes, detectors_mask, matching_true_boxes],
    #         np.zeros(len(image_data)))
    #import pdb; pdb.set_trace()
    model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
              np.zeros(len(image_data)),
              batch_size=2,
              epochs=num_steps)
    model.save_weights('overfit_circle_square_weights.h5')

    #model.load_weights('overfit_circle_square_weights.h5')
    #model.load_weights('overfit_weights.h5')

    # Create output variables for prediction.
    yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=.3,
                                       iou_threshold=.9)

    # Run prediction on overfit image.
    num_shapes = 10
    image_data = np.empty((1, 416, 416, 3), dtype=np.float)
    #for sh in range(num_shape)
    circ2 = geo.circle(shape=(416, 416), radius=40, position=(0.4, 0.4))
    #circ3 = geo.circle(shape=(416,416),radius=40,position=0.8)
    #sq2 = geo.square(shape=(416,416),side=40,position=0.2)
    sq3 = geo.square(shape=(416, 416), side=40, position=0.8)
    image_data = np.empty((1, 416, 416, 3), dtype=np.float)
    image_data[0, :, :, 0] = circ2 + sq3
    image_data[0, :, :, 1] = circ2 + sq3
    image_data[0, :, :, 2] = circ2 + sq3

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.
    out_boxes, out_scores, out_classes = sess.run(
        [boxes, scores, classes],
        feed_dict={
            model_body.input: image_data,
            #input_image_shape: [image.size[1], image.size[0]],
            input_image_shape: [416, 416],
            K.learning_phase(): 0
        })
    print('Found {} boxes for image.'.format(len(out_boxes)))
    print(out_boxes)

    # Plot image with predicted boxes.
    image_with_boxes = draw_boxes(image_data[0], out_boxes, out_classes,
                                  class_names, out_scores)
    plt.imshow(image_with_boxes, interpolation='nearest')
    plt.show()
예제 #22
0
model_image_size = yolo_model_keras.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)

# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_tags), 1., 1.) for x in range(len(class_tags))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
	map(
		lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
		colors))
random.seed(10101)  # Fixed seed for consistent colors across runs.
random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
random.seed(None)  # Reset seed to default.

# Generate output tensor targets for filtered bounding boxes.
yolo_outputs = yolo_head(yolo_model_keras.output, anchors, len(class_tags))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
	yolo_outputs,
	input_image_shape,
	score_threshold=score_threshold,
	iou_threshold=intersection_of_union_threshold)


def detect_img(img_path, out_path):
	try:
		image = Image.open(os.path.join("", img_path))
	except Exception:
		print("Image load exception")
		return
	if is_fixed_size:  
예제 #23
0
파일: picam.py 프로젝트: nagyistoce/PiCamNN
def yoloThread():
    global frames, times
    model_path = scriptFolder + "tiny.h5"  #Model weights
    sess = K.get_session()
    print("[PiCam] Loading anchors file...")
    anchors = [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62,
               10.52]  #Tiny Yolo anchors' values
    anchors = np.array(anchors).reshape(-1, 2)
    print("[PiCam] Loading yolo model ({})...".format(scriptFolder +
                                                      "tiny.h5"))
    yolo_model = load_model(model_path)  #Loading Tiny YOLO
    num_anchors = len(anchors)
    print('[PiCam] YOLO model loaded !'.format(model_path))

    model_image_size = yolo_model.layers[0].input_shape[1:3]  #Get input shape
    yolo_outputs = yolo_head(yolo_model.output, anchors, 20)
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=0.3,
                                       iou_threshold=0.4)
    num = 0  #Starting Photo's name
    old_time = 0.0  #Latest time

    print("[PiCam] YOLO Thread started!")
    ### Loop:
    while True:
        if len(frames) != 0:
            try:
                cv2.waitKey(17)
                mat = frames[0]  #Get First frame with movements
                mat = cv2.resize(mat,
                                 (model_image_size[0], model_image_size[1]))
                in_mat = np.array(mat, dtype='float32')
                in_mat /= 255.  #Removing mean
                in_mat = np.expand_dims(in_mat, 0)
                if (times[0] - old_time) > time_chunck:
                    #Searching for detection:
                    out_boxes, out_scores, out_classes = sess.run(
                        [boxes, scores, classes],
                        feed_dict={
                            yolo_model.input: in_mat,
                            input_image_shape: [mat.shape[1], mat.shape[0]],
                            K.learning_phase(): 0
                        })
                    if len(out_boxes) > 0:
                        writ = False
                        xs, ys = [], []  #X's and Y's coordinate
                        for i, c in reversed(list(enumerate(out_classes))):
                            if c == 14:  #14 is the label for persons
                                writ = True
                                box = out_boxes[i]
                                top, left, bottom, right = box
                                top = max(0,
                                          np.floor(top + 0.5).astype('int32'))
                                left = max(
                                    0,
                                    np.floor(left + 0.5).astype('int32'))
                                bottom = min(
                                    mat.shape[1],
                                    np.floor(bottom + 0.5).astype('int32'))
                                right = min(
                                    mat.shape[0],
                                    np.floor(right + 0.5).astype('int32'))
                                xs.append(left + i)
                                xs.append(right - i)
                                ys.append(top + i)
                                ys.append(bottom - i)
                        if writ:
                            img_name = scriptFolder + "imgs/{}.png".format(num)
                            cv2.imwrite(
                                img_name, mat[min(ys):max(ys),
                                              min(xs):max(xs)]
                            )  #Only saving the rectangle in which persons' got detected
                            out_s = "[{}] Detected person (taken {}s)!\n".format(
                                time.strftime("%H:%M:%S"),
                                round(time.time() - times[0]))  #Log output
                            print(out_s)
                            flog.write(out_s)
                            flog.flush()
                            try:  #Preventig Problems like no connection #I've used subprocess to set a timeout
                                subprocess.call(
                                    "telegram-cli -W -e \'send_photo {} {} \' "
                                    .format(telegram_user, img_name),
                                    timeout=30,
                                    shell=True)
                            except Exception as exc:
                                print(
                                    "[PiCam] Some error occured in YOLO Thread ({}) :"
                                    .format(time.strftime("%H:%M:%S")), exc)
                            num += 1
                            old_time = times[0]  #Updating detection time
            except Exception as ex:
                print(
                    "[PiCam] Some error occured in YOLO Thread ({}) :".format(
                        time.strftime("%H:%M:%S")), ex)
            del times[0]  #Deleting first Detection time
            del frames[0]  #Deleting first Frame
        cv2.waitKey(50)
예제 #24
0
 def _evaluate_output(self):
     with tf.name_scope('output'):
         self.outputs = yolo_head(self.model.output, self.anchors,
                                  len(self.class_names))
예제 #25
0
def draw(model_body,
         class_names,
         anchors,
         image_data,
         image_set='val',
         weights_name='trained_stage_3_best.h5',
         out_path="output_images",
         save_all=True):
    '''
    Draw bounding boxes on image data
    '''
    if image_set == 'train':
        image_data = np.array([
            np.expand_dims(image, axis=0)
            for image in image_data[:int(len(image_data) * .9)]
        ])
    elif image_set == 'val':
        image_data = np.array([
            np.expand_dims(image, axis=0)
            for image in image_data[int(len(image_data) * .9):]
        ])
    elif image_set == 'all':
        image_data = np.array(
            [np.expand_dims(image, axis=0) for image in image_data])
    else:
        ValueError("draw argument image_set must be 'train', 'val', or 'all'")
    # model.load_weights(weights_name)
    print(image_data.shape)
    model_body.load_weights(weights_name)

    # Create output variables for prediction.
    yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=0.07,
                                       iou_threshold=0.)

    # Run prediction on overfit image.
    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    if not os.path.exists(out_path):
        os.makedirs(out_path)
    for i in range(10):
        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                model_body.input: image_data[i],
                input_image_shape: [image_data.shape[2], image_data.shape[3]],
                K.learning_phase(): 0
            })
        print('Found {} boxes for image.'.format(len(out_boxes)))
        print(out_boxes)

        # Plot image with predicted boxes.
        image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
                                      class_names, out_scores)
        # Save the image:
        if save_all or (len(out_boxes) > 0):
            image = PIL.Image.fromarray(image_with_boxes)
            image.save(os.path.join(out_path, str(i) + '.png'))

        # To display (pauses the program):
        plt.imshow(image_with_boxes, interpolation='nearest')
        plt.show()
예제 #26
0
def draw(model_body,
         class_names,
         anchors,
         image_set='val',
         weights_name='trained_stage_1.h5',
         out_path="output_images",
         save_all=True):
    '''
    Draw bounding boxes on image data
    '''
    if image_set == 'train':
        image_names = glob.glob("./DATA/00001/*.ppm")
        image_data = np.array([cv2.imread(image) for image in image_names])
        image_data = process_data(images=image_data, boxes=None)
        image_data = np.array(
            [np.expand_dims(image, axis=0) for image in image_data])
    elif image_set == 'val':
        image_names = glob.glob("./DATA/validation/*.ppm")
        image_data = np.array([cv2.imread(image) for image in image_names])
        image_data = process_data(images=image_data, boxes=None)
        image_data = np.array(
            [np.expand_dims(image, axis=0) for image in image_data])
    elif image_set == 'all':
        image_names = glob.glob("./DATA/00001/*.ppm")
        image_data = np.array([cv2.imread(image) for image in image_names])
        image_data = process_data(images=image_data, boxes=None)
        image_data = np.array(
            [np.expand_dims(image, axis=0) for image in image_data])
    else:
        ValueError("draw argument image_set must be 'train', 'val', or 'all'")
    # model.load_weights(weights_name)
    print(image_data.shape)
    model_body.load_weights(weights_name)

    # Create output variables for prediction.
    yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=0.07,
                                       iou_threshold=0.0)

    # Run prediction on overfit image.
    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    if not os.path.exists(out_path):
        os.makedirs(out_path)
        #running the session to get boxes ,scores and corresponding classes to be used in drawing boxes.
    for i in range(len(image_data)):
        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                model_body.input: image_data[i],
                input_image_shape: [image_data.shape[2], image_data.shape[3]],
                K.learning_phase(): 0
            })
        print('Found {} boxes for image.'.format(len(out_boxes)))
        print(out_boxes)

        # Plot image with predicted boxes.
        image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
                                      class_names, out_scores)
        # Save the image:
        if save_all or (
                len(out_boxes) > 0
        ):  # if no boxes are found so don't save the image.(you can change >0 to >=0 to save also images with no boxes to be able to see images that your model couldn't find the objects in it)
            image = PIL.Image.fromarray(image_with_boxes)
            image.save(os.path.join(
                out_path,
                str(i) +
                '.png'))  #Save images to output path passed in arguments.
예제 #27
0
def predict(sess, input_path="images", out_path="out"):
    """
	Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

	Arguments:
	sess -- your tensorflow/Keras session containing the YOLO graph
	image_file -- name of an image stored in the "images" folder.

	Returns:
	out_scores -- tensor of shape (None, ), scores of the predicted boxes
	out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
	out_classes -- tensor of shape (None, ), class index of the predicted boxes

	Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. 
	"""

    class_names = read_classes("model_data/coco_classes.txt")
    anchors = read_anchors("model_data/yolo_anchors.txt")
    image_shape = (720., 1280.)

    yolo_model = load_model("model_data/yolo.h5")

    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

    file_list = sorted(os.listdir(input_path))

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    # Preprocess your image
    for i, image_file in enumerate(file_list):
        image, image_data = preprocess_image(os.path.join(
            input_path, image_file),
                                             model_image_size=(608, 608))

        # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
        # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
        with tf.device('device:GPU:0'):
            out_scores, out_boxes, out_classes = sess.run(
                [scores, boxes, classes],
                feed_dict={
                    yolo_model.input: image_data,
                    K.learning_phase(): 0
                })

        # Print predictions info
        print('')
        print('=======[ {}-th process ]======='.format(i))
        print('Found {} boxes for {}'.format(len(out_boxes), image_file))
        print('')

        # Generate colors for drawing bounding boxes.
        colors = generate_colors(class_names)

        # Draw bounding boxes on the image file
        draw_boxes(image, out_scores, out_boxes, out_classes, class_names,
                   colors)

        # Save the predicted bounding box on the image
        image.save(os.path.join(out_path, image_file), quality=90)
예제 #28
0
def model_prediction_with_Person(score_threshold=0.5, iou_threshold=0.2):

    #Machines initialization
    machines = []
    traction = Machine("Traction", [105, 129, 285, 207])
    bench = Machine("Bench", [155, 227, 272, 267])
    machines.append(traction)
    machines.append(bench)

    model_path = os.path.expanduser('model_data/yolo.h5')
    anchors_path = os.path.expanduser('model_data/yolo_anchors.txt')
    classes_path = os.path.expanduser('model_data/coco_classes.txt')
    test_path = os.path.expanduser('data/frames_vid_salle_insa')
    output_path = os.path.expanduser(
        'data/frames_vid_salle_insa_160_with_Person_out')
    output_path = output_path + "/"

    if not os.path.exists(output_path):
        print('Creating output path {}'.format(output_path))
        os.mkdir(output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=score_threshold,
                                       iou_threshold=iou_threshold)

    # tracking
    persons = {}
    comp = 0
    id = 0
    # tracking
    files = [file for file in os.listdir(test_path)]
    files.sort(key=natural_keys)
    for image_file in files:
        # for image_file in os.listdir(test_path):
        try:
            image_type = imghdr.what(os.path.join(test_path, image_file))
            if not image_type:
                continue
        except IsADirectoryError:
            continue

        image = Image.open(os.path.join(test_path, image_file))

        if is_fixed_size:  # TODO: When resizing we can use minibatch input.
            resized_image = image.resize(tuple(reversed(model_image_size)),
                                         Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
        else:
            # Due to skip connection + max pooling in YOLO_v2, inputs must have
            # width and height as multiples of 32.
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            resized_image = image.resize(new_image_size, Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
            print(image_data.shape)

        image_data /= 255.
        image_frame = np.array(image, dtype='float32')
        image_frame /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        distance_normalizer = sqrt(
            pow(image_frame.shape[0] - 0, 2) +
            pow(image_frame.shape[1] - 0, 2))
        # print("distance_normalizer :",distance_normalizer)
        # print("normalized 50 =>",50/distance_normalizer)
        # print("normalized 100 =>",100/distance_normalizer)

        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        print('\nFound {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        comp_p = 0
        for i, c in reversed(list(enumerate(out_classes))):
            if c == 0:
                comp_p += 1
                predicted_class = class_names[c]
                box = out_boxes[i]
                score = out_scores[i]

                histr = []

                boolean_intersection = box_intersection_with_object(
                    box, i, out_classes, out_boxes)
                print("Current user intersection with another object : ",
                      boolean_intersection)

                # reshape bounding box into the image border
                x_top, y_left, x_bottom, y_right = box
                x_top = max(0, x_top)
                y_left = max(0, y_left)
                x_bottom = min(x_bottom, image_frame.shape[0])
                y_right = min(y_right, image_frame.shape[1])
                box = x_top, y_left, x_bottom, y_right
                box = resize_bouding_box(box)
                color = ('b', 'g', 'r')

                for i, col in enumerate(color):
                    histr.append(
                        cv2.calcHist([
                            image_frame[int(box[0]):int(box[2]),
                                        int(box[1]):int(box[3])]
                        ], [i], None, [256], [0, 1]))

                # plot_histo(histr)
                rgbHist = []
                if comp == 0:

                    rgbHist.append(histr)
                    person = Person("person" + str(id), comp, box, rgbHist, 0)

                    persons[person.name] = person
                    print("NEW USER WELCOM =>", person.name)

                    id += 1
                else:
                    print("\n TRACKING COMPUTATION  CURRENT USER", comp_p)
                    dico_histo = color_tracking_v2(histr, persons)
                    print("\nPROCESS METRICS SELECTION CURRENT USER", comp_p)
                    # name=process_metrics_v2(dico_histo,box,histr,persons,comp,distance_normalizer)
                    name, boolean_redecouverte, key_to_delete, boolena_ressemble_bcp = process_metrics_v2(
                        dico_histo, box, histr, persons, comp,
                        distance_normalizer)

                    if name == "default":
                        print("NEW USER WELCOM =>", "person" + str(id))
                        rgbHist.append(histr)
                        person = Person("person" + str(id), comp, box, rgbHist,
                                        0)
                        persons[person.name] = person
                        id += 1
                    else:
                        persons = updatePersons(name, box, histr, persons,
                                                boolean_intersection,
                                                boolean_redecouverte,
                                                boolena_ressemble_bcp)
                        if boolean_redecouverte and key_to_delete != "default":
                            print("Je vais supprimer :", key_to_delete)
                            persons.pop(key_to_delete)
                        # inutile la ligne du dessus est suffisante mais pour le display des bounding box j'en ai besoin
                        person = Person(name, comp, box, rgbHist)

                print(person.box)

                if comp % 30 == 0:  #faut mettre 30 pour de vrai
                    machines = updateTimeMachine(person, machines)

                #before tracking
                # label = '{} {:.2f}'.format(predicted_class, score)
                label = '{} {} {:.2f}'.format(person.name, predicted_class,
                                              score)
                draw = ImageDraw.Draw(image)
                label_size = draw.textsize(label, font)

                top, left, bottom, right = box
                # top, left, bottom, right =  [0, 0, 400, 800]
                top = max(0, np.floor(top + 0.5).astype('int32'))
                left = max(0, np.floor(left + 0.5).astype('int32'))
                bottom = min(image.size[1],
                             np.floor(bottom + 0.5).astype('int32'))
                right = min(image.size[0],
                            np.floor(right + 0.5).astype('int32'))

                if top - label_size[1] >= 0:
                    text_origin = np.array([left, top - label_size[1]])
                else:
                    text_origin = np.array([left, top + 1])

                # My kingdom for a good redistributable image drawing library.
                for i in range(thickness):
                    draw.rectangle([left + i, top + i, right - i, bottom - i],
                                   outline=colors[c])
                draw.rectangle(
                    [tuple(text_origin),
                     tuple(text_origin + label_size)],
                    fill=colors[c])
                draw.text(text_origin, label, fill=(0, 0, 0), font=font)
                del draw

        if comp % 30 == 0:  #faut mettre 30 pour de vrai
            machines = usedMachine(machines)

        # draw machine
        machine = machines[0]
        label = '{} {} '.format(machine.name, machine.isUsed)
        draw = ImageDraw.Draw(image)
        label_size = draw.textsize(label, font)

        top, left, bottom, right = machine.box
        # top, left, bottom, right =  [0, 0, 400, 800]
        top = max(0, np.floor(top + 0.5).astype('int32'))
        left = max(0, np.floor(left + 0.5).astype('int32'))
        bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
        right = min(image.size[0], np.floor(right + 0.5).astype('int32'))

        if top - label_size[1] >= 0:
            text_origin = np.array([left, top - label_size[1]])
        else:
            text_origin = np.array([left, top + 1])

        if machine.isUsed == True:
            c = 1
        else:
            c = 2

        # My kingdom for a good redistributable image drawing library.
        for i in range(thickness):
            draw.rectangle([left + i, top + i, right - i, bottom - i],
                           outline=colors[c])
        draw.rectangle([tuple(text_origin),
                        tuple(text_origin + label_size)],
                       fill=colors[c])
        draw.text(text_origin, label, fill=(0, 0, 0), font=font)
        del draw

        print("output_path :", output_path)
        print("image_file : ", image_file)
        image.save(os.path.expanduser(output_path + image_file),
                   "PNG",
                   quality=90)

        comp += 1
        # que du print
        # for key , person in persons.items():
        #     print(person.name ,len(person.histmoyface1),len(person.histmoyface2))
        print('Found {} boxes for {}'.format(len(out_boxes), image_file))
        printMachineState(machines)

        # text = input("pause")

    sess.close()
예제 #29
0
파일: test_yolo.py 프로젝트: ogiwolf/python
def _main(args):
    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    test_path = os.path.expanduser(args.test_path)
    output_path = os.path.expanduser(args.output_path)

    if not os.path.exists(output_path):
        print('Creating output path {}'.format(output_path))
        os.mkdir(output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)
    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(yolo_outputs,
                                       input_image_shape,
                                       score_threshold=args.score_threshold,
                                       iou_threshold=args.iou_threshold)

    for image_file in os.listdir(test_path):
        try:
            image_type = imghdr.what(os.path.join(test_path, image_file))
            if not image_type:
                continue
        except IsADirectoryError:
            continue

        image = Image.open(os.path.join(test_path, image_file))
        if is_fixed_size:  # TODO: When resizing we can use minibatch input.
            resized_image = image.resize(tuple(reversed(model_image_size)),
                                         Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
        else:
            # Due to skip connection + max pooling in YOLO_v2, inputs must have
            # width and height as multiples of 32.
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            resized_image = image.resize(new_image_size, Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
            print(image_data.shape)

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        print('Found {} boxes for {}'.format(len(out_boxes), image_file))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300
        import pdb
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]
            if predicted_class == 'orange' or predicted_class == 'apple':
                predicted_class = 'Mango'

            AllTotalTime = str(round(14 - 14 * score))
            label = '{} {:.2f}'.format(
                predicted_class,
                score * 100) + "%" + "  Maturity: " + AllTotalTime + "Day"

            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=colors[c])

            #方形點中間座標
            RectCenter = str(((left + i) + (right - i)) / 2) + " , " + str(
                ((top + i) + (bottom - i)) / 2)

            #在Label顯示座標
            label = label + "\n Position: " + RectCenter
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        image.save(os.path.join(output_path, image_file), quality=90)
    sess.close()
예제 #30
0
def _main(args):

    model_path = os.path.expanduser(args.model_path)
    assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    input_path = os.path.expanduser(args.input_path)
    output_path = os.path.expanduser(args.output_path)

    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)

    yolo_model = load_model(model_path)

    # Verify model, anchors, and classes are compatible
    num_classes = len(class_names)
    num_anchors = len(anchors)

    # TODO: Assumes dim ordering is channel last
    model_output_channels = yolo_model.layers[-1].output_shape[-1]
    assert model_output_channels == num_anchors * (num_classes + 5), \
        'Mismatch between model and given anchor and class sizes. ' \
        'Specify matching anchors and classes with --anchors_path and ' \
        '--classes_path flags.'
    print('{} model, anchors, and classes loaded.'.format(model_path))

    # Check if model is fully convolutional, assuming channel last order.
    model_image_size = yolo_model.layers[0].input_shape[1:3]
    is_fixed_size = model_image_size != (None, None)

    # Generate colors for drawing bounding boxes.
    hsv_tuples = [(x / len(class_names), 1., 1.)
                  for x in range(len(class_names))]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    random.seed(10101)  # Fixed seed for consistent colors across runs.
    random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
    random.seed(None)  # Reset seed to default.

    # Generate output tensor targets for filtered bounding boxes.
    # TODO: Wrap these backend operations with Keras layers.
    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(
        yolo_outputs,
        input_image_shape,
        score_threshold=args.score_threshold,
        iou_threshold=args.iou_threshold)

    only_class = None

    bbMan = BBoxMan()

    bbMan.perspMat = None

    if args.only_class != "None":
        only_class = args.only_class

    def process_video_frame(image_np):
        '''
        process video within the scope of main to inherit all the variables
        '''
        
        if bbMan.perspMat is None:
            bbMan.perspMat = make_persp_mat(image_np)
            print('bbMan.perspMat')
            print(bbMan.perspMat)

        image = Image.fromarray(image_np)

        if is_fixed_size:  # TODO: When resizing we can use minibatch input.
            resized_image = image.resize(
                tuple(reversed(model_image_size)), Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
        else:
            # Due to skip connection + max pooling in YOLO_v2, inputs must have
            # width and height as multiples of 32.
            new_image_size = (image.width - (image.width % 32),
                                image.height - (image.height % 32))
            resized_image = image.resize(new_image_size, Image.BICUBIC)
            image_data = np.array(resized_image, dtype='float32')
            print(image_data.shape)

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = sess.run(
            [boxes, scores, classes],
            feed_dict={
                yolo_model.input: image_data,
                input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        
        print('Found {} boxes'.format(len(out_boxes)))

        font = ImageFont.truetype(
            font='font/FiraMono-Medium.otf',
            size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))

        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            #when we have specified just one class to detect, skip it when
            #it does not match
            if(only_class is not None and predicted_class != only_class):
                print('ignoring', predicted_class)
                continue

            col = colors[random.randrange(0, len(colors))]
            bbox = BBox(box, predicted_class, score, col, image)
            bbMan.add_box(bbox)
        
        bbMan.purge()
        bbMan.draw(image, font, thickness, bbMan.perspMat)

        return np.asarray(image)

    '''
    Now setup video clip parser
    '''
    start = args.start
    end = args.end
    clip1 = VideoFileClip(input_path)
    clip1 = clip1.subclip(start, end)
    out_clip = clip1.fl_image(process_video_frame)
    out_clip.write_videofile(output_path, audio=False)
    sess.close()
예제 #31
0
    # Display the results in the notebook
    output_image_raw = cv2.imread(os.path.join("out", image_file))
    output_image = output_image_raw[..., ::-1]
    imshow(output_image)

    return out_scores, out_boxes, out_classes


sess = K.get_session()

#Model loading
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
yolo_model = load_model("model_data/yolo.h5")
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

# In[Pic loading & Testing]
#Choose image
tic = time.clock()
image_file = "test21.jpg"
image_original = cv2.imread("images/" + image_file)
height = image_original.shape[0]
width = image_original.shape[1]
image_shape = (float(height), float(width))

#Apply to the YOLO filter and non-max suppression
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

#Detection
print('')
예제 #32
0
    def test_model(self, model_body, part_name):
        score_threshold = 0.5
        iou_threshold = 0.5
        part_name = 'test'

        # Evaluate the output of the model body
        yolo_outputs = yolo_head(model_body.output, self.data.anchors,
                                 len(self.data.classes))
        input_image_shape = K.placeholder(shape=(2, ))
        max_boxes = self.data.get_num_boxes()
        boxes, scores, classes = yolo_eval(yolo_outputs,
                                           input_image_shape,
                                           max_boxes=max_boxes,
                                           score_threshold=score_threshold,
                                           iou_threshold=iou_threshold)

        # Get tensorflow session
        sess = K.get_session(
        )  # TODO: Remove dependence on Tensorflow session.

        # Initialize variables to store predictions and true labels
        m = len(self.data.partition[part_name])
        IDs = []
        boxes_pred = []
        scores_pred = []
        classes_pred = []

        # Get the generator
        gen = self.data.get_generator(part_name)

        tic = time.time()
        # Loop over each image and test
        for i in range(m):
            # Get next training sample
            x, _ = next(gen)

            # Extract information from sample
            image = x[0]  # decimal values from 0 to 1
            ID = x[-1][0]
            IDs.append(ID)

            # Run Prediction
            out_boxes, out_scores, out_classes = sess.run(
                [boxes, scores, classes],
                feed_dict={
                    model_body.input:
                    image,
                    input_image_shape:
                    [self.data.image_size[1], self.data.image_size[0]],
                    K.learning_phase():
                    0
                })

            # Process outputs
            out_boxes = out_boxes[:, [1, 0, 3,
                                      2]]  # outputs boxes [y2, x2, y1, x2]
            # print(str(out_boxes.shape[0]) + " objects detected")
            self.data.compare_prediction(ID, out_boxes, out_classes,
                                         out_scores, 1)

            # Append predictions and pad to be the same size as true labels
            boxes_pred.append(out_boxes)
            scores_pred.append(out_scores)
            classes_pred.append(out_classes)

            if (i + 1) % 25 == 0:
                print("Finished Predictions for %d / %d" % (i + 1, m))
        toc = time.time() - tic
        print(toc)
        print("Predictions per second: %.2f" % (m / toc))

        # Convert to numpy arrays
        IDs = np.array(IDs)

        np.savez(os.path.join(self.output_path, "Predictions.npz"),
                 boxes=boxes_pred,
                 scores=scores_pred,
                 classes=classes_pred,
                 ids=IDs)
        self.calc_metrics(IDs, boxes_pred, classes_pred, scores_pred)
    with tf.Session() as test_b:
        yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),
                    tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
                    tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
                    tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)) 

        scores, boxes, classes = yolo_eval(yolo_outputs)
        print("scores[2] = " + str(scores[2].eval()))
        print("boxes[2] = " + str(boxes[2].eval()))
        print("classes[2] = " + str(classes[2].eval()))
        print("scores.shape = " + str(scores.eval().shape))
        print("boxes.shape = " + str(boxes.eval().shape))
        print("classes.shape = " + str(classes.eval().shape))

    sess = K.get_session()
    class_names = read_classes("model_data/coco_classes.txt")
    anchors = read_anchors("model_data/yolo_anchors.txt")
    image_shape = (720., 1280.)    

    yolo_model = load_model("model_data/yolo.h5")
    yolo_model.summary()

    yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

    scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)

    # test images
    out_scores, out_boxes, out_classes = predict(sess, "test.jpg")
    out_scores, out_boxes, out_classes = predict(sess, "0012.jpg")
    out_scores, out_boxes, out_classes = predict(sess, "0006.jpg")
예제 #34
0
def _main(args):
    voc_path = os.path.expanduser(args.data_path)
    classes_path = os.path.expanduser(args.classes_path)
    anchors_path = os.path.expanduser(args.anchors_path)

    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]

    if os.path.isfile(anchors_path):
        with open(anchors_path) as f:
            anchors = f.readline()
            anchors = [float(x) for x in anchors.split(',')]
            anchors = np.array(anchors).reshape(-1, 2)
    else:
        anchors = YOLO_ANCHORS

    voc = h5py.File(voc_path, 'r')
    image = PIL.Image.open(io.BytesIO(voc['train/images'][28]))
    orig_size = np.array([image.width, image.height])
    orig_size = np.expand_dims(orig_size, axis=0)

    # Image preprocessing.
    image = image.resize((416, 416), PIL.Image.BICUBIC)
    image_data = np.array(image, dtype=np.float)
    image_data /= 255.

    # Box preprocessing.
    # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
    boxes = voc['train/boxes'][28]
    boxes = boxes.reshape((-1, 5))
    # Get extents as y_min, x_min, y_max, x_max, class for comparision with
    # model output.
    boxes_extents = boxes[:, [2, 1, 4, 3, 0]]

    # Get box parameters as x_center, y_center, box_width, box_height, class.
    boxes_xy = 0.5 * (boxes[:, 3:5] + boxes[:, 1:3])
    boxes_wh = boxes[:, 3:5] - boxes[:, 1:3]
    boxes_xy = boxes_xy / orig_size
    boxes_wh = boxes_wh / orig_size
    boxes = np.concatenate((boxes_xy, boxes_wh, boxes[:, 0:1]), axis=1)

    # Precompute detectors_mask and matching_true_boxes for training.
    # Detectors mask is 1 for each spatial position in the final conv layer and
    # anchor that should be active for the given boxes and 0 otherwise.
    # Matching true boxes gives the regression targets for the ground truth box
    # that caused a detector to be active or 0 otherwise.
    detectors_mask_shape = (13, 13, 5, 1)
    matching_boxes_shape = (13, 13, 5, 5)
    detectors_mask, matching_true_boxes = preprocess_true_boxes(boxes, anchors,
                                                                [416, 416])

    # Create model input layers.
    image_input = Input(shape=(416, 416, 3))
    boxes_input = Input(shape=(None, 5))
    detectors_mask_input = Input(shape=detectors_mask_shape)
    matching_boxes_input = Input(shape=matching_boxes_shape)

    print('Boxes:')
    print(boxes)
    print('Box corners:')
    print(boxes_extents)
    print('Active detectors:')
    print(np.where(detectors_mask == 1)[:-1])
    print('Matching boxes for active detectors:')
    print(matching_true_boxes[np.where(detectors_mask == 1)[:-1]])

    # Create model body.
    model_body = yolo_body(image_input, len(anchors), len(class_names))
    model_body = Model(image_input, model_body.output)
    # Place model loss on CPU to reduce GPU memory usage.
    with tf.device('/cpu:0'):
        # TODO: Replace Lambda with custom Keras layer for loss.
        model_loss = Lambda(
            yolo_loss,
            output_shape=(1, ),
            name='yolo_loss',
            arguments={'anchors': anchors,
                       'num_classes': len(class_names)})([
                           model_body.output, boxes_input,
                           detectors_mask_input, matching_boxes_input
                       ])
    model = Model(
        [image_input, boxes_input, detectors_mask_input,
         matching_boxes_input], model_loss)
    model.compile(
        optimizer='adam', loss={
            'yolo_loss': lambda y_true, y_pred: y_pred
        })  # This is a hack to use the custom loss function in the last layer.

    # Add batch dimension for training.
    image_data = np.expand_dims(image_data, axis=0)
    boxes = np.expand_dims(boxes, axis=0)
    detectors_mask = np.expand_dims(detectors_mask, axis=0)
    matching_true_boxes = np.expand_dims(matching_true_boxes, axis=0)

    num_steps = 1000
    # TODO: For full training, put preprocessing inside training loop.
    # for i in range(num_steps):
    #     loss = model.train_on_batch(
    #         [image_data, boxes, detectors_mask, matching_true_boxes],
    #         np.zeros(len(image_data)))
    model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
              np.zeros(len(image_data)),
              batch_size=1,
              epochs=num_steps)
    model.save_weights('overfit_weights.h5')

    # Create output variables for prediction.
    yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
    input_image_shape = K.placeholder(shape=(2, ))
    boxes, scores, classes = yolo_eval(
        yolo_outputs, input_image_shape, score_threshold=.3, iou_threshold=.9)

    # Run prediction on overfit image.
    sess = K.get_session()  # TODO: Remove dependence on Tensorflow session.
    out_boxes, out_scores, out_classes = sess.run(
        [boxes, scores, classes],
        feed_dict={
            model_body.input: image_data,
            input_image_shape: [image.size[1], image.size[0]],
            K.learning_phase(): 0
        })
    print('Found {} boxes for image.'.format(len(out_boxes)))
    print(out_boxes)

    # Plot image with predicted boxes.
    image_with_boxes = draw_boxes(image_data[0], out_boxes, out_classes,
                                  class_names, out_scores)
    plt.imshow(image_with_boxes, interpolation='nearest')
    plt.show()