Beispiel #1
0
def postprocess(image_info, np_pred_boxes, np_pred_confidences, H, options):
    pred_anno = al.Annotation()
    #pred_anno.imageName = image_info['path']
    #pred_anno.imagePath = os.path.abspath(image_info['path'])
    _, rects = add_rectangles(H, [image_info['transformed']],
                              np_pred_confidences,
                              np_pred_boxes,
                              use_stitching=True,
                              rnn_len=H['rnn_len'],
                              min_conf=options['min_conf'],
                              tau=options['tau'],
                              show_suppressed=False)

    rects = [
        r for r in rects
        if r.x1 < r.x2 and r.y1 < r.y2 and r.score > options['min_conf']
    ]
    h, w = image_info['original'].shape[:2]
    if 'rotate90' in H['data'] and H['data']['rotate90']:
        # original image height is a width for roatated one
        rects = Rotate90.invert(h, rects)
    pred_anno.rects = rects
    pred_anno = rescale_boxes((H['image_height'], H['image_width']), pred_anno,
                              h, w)
    return pred_anno
Beispiel #2
0
def hot_predict(image_path, init_params, options):
    """Makes predictions when all long running preparation operations are made.

    Args:
        image_path (string): The path to the source image.
        init_params (dict): The parameters produced by :func:`initialize`.
        options (dict): The options for more precise prediction of bounding boxes.

    Returns (Annotation):
        The annotation for the source image.
    """

    H = init_params['hypes']

    # predict
    orig_img = imread(image_path)[:, :, :3]
    img = imresize(orig_img, (H['image_height'], H['image_width']), interp='cubic')
    (np_pred_boxes, np_pred_confidences) = init_params['sess'].\
        run([init_params['pred_boxes'], init_params['pred_confidences']], feed_dict={init_params['x_in']: img})
    pred_anno = al.Annotation()
    pred_anno.imageName = image_path
    _, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, use_stitching=True,
                              rnn_len=H['rnn_len'], min_conf=options['min_conf'], tau=options['tau'],
                              show_suppressed=options['show_suppressed'])

    pred_anno.rects = [r for r in rects if r.x1 < r.x2 and r.y1 < r.y2]
    pred_anno.imagePath = os.path.abspath(image_path)
    pred_anno = rescale_boxes((H['image_height'], H['image_width']), pred_anno, orig_img.shape[0], orig_img.shape[1])
    return pred_anno
Beispiel #3
0
    def detect(self, image):
        pred_annolist = al.AnnoList()

        #true_annolist = al.parse(args.test_boxes)
        #data_dir = os.path.dirname(args.test_boxes)
        #image_dir = get_image_dir(args)
        #subprocess.call('mkdir -p %s' % image_dir, shell=True)

        orig_img = image[:,:,:3]
        img = imresize(orig_img, (self.H["image_height"], self.H["image_width"]))
        feed = {self.x_in: img}
        (np_pred_boxes, np_pred_confidences) = self.sess.run([self.pred_boxes, self.pred_confidences], feed_dict=feed)

        pred_anno = al.Annotation()
        pred_anno.imageName = "test"
        new_img, rects = add_rectangles(self.H, [img], np_pred_confidences, np_pred_boxes,
                                        use_stitching=True, rnn_len=self.H['rnn_len'], min_conf=0.2, tau=0.25, show_suppressed=False)

        pred_anno.rects = rects
        pred_anno.imagePath = "none"#os.path.abspath(data_dir)
        pred_anno = rescale_boxes((self.H["image_height"], self.H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
        pred_annolist.append(pred_anno)
        predictions = []
        for pred in pred_annolist:
            predictions.append([pred.rects[0].x1, pred.rects[0].y1, pred.rects[0].x2, pred.rects[0].y2])
            
        misc.imsave("test.jpg", new_img)
    	return predictions
    def predict_image(self, image):
        """
        Infer buildings for a single image.
        Inputs:
            image :: n x m x 3 ndarray - Should be in RGB format
        """

        orig_img = image.copy()[:,:,:3]
        img = imresize(orig_img, (self.H["image_height"], self.H["image_width"]), interp='cubic')
        feed = {self.x_in: img}

        t0 = time.time()
        (np_pred_boxes, np_pred_confidences) = self.session.run([self.pred_boxes, self.pred_confidences], feed_dict=feed)
        total_time = time.time() - t0

        new_img, rects, all_rects = add_rectangles(
            self.H, 
            [img], 
            np_pred_confidences, 
            np_pred_boxes,
            use_stitching=True, 
            rnn_len=self.H['rnn_len'], 
            min_conf=0.5, # only affects `rects`, not `all_rects`
            tau=0.25, 
            show_suppressed=False
        )

        pred_anno = al.Annotation()
        pred_anno.rects = all_rects
        pred_anno = rescale_boxes((self.H["image_height"], self.H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])

        pred_rects = pandas.DataFrame([[r.x1, r.y1, r.x2, r.y2, r.score] for r in all_rects], columns=['x1', 'y1', 'x2', 'y2', 'score'])

        return pred_rects
Beispiel #5
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32, name="x_in", shape=[H["image_height"], H["image_width"], 3])
    googlenet = googlenet_load.init(H)
    if H["use_rezoom"]:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(
            H, tf.expand_dims(x_in, 0), googlenet, "test", reuse=None
        )
        grid_area = H["grid_height"] * H["grid_width"]
        pred_confidences = tf.reshape(
            tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H["rnn_len"], 2])), [grid_area, H["rnn_len"], 2]
        )
        if H["reregress"]:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(
            H, tf.expand_dims(x_in, 0), googlenet, "test", reuse=None
        )
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_idl)
        data_dir = os.path.dirname(args.test_idl)
        image_dir = get_image_dir(args)
        subprocess.call("mkdir -p %s" % image_dir, shell=True)
        for i in range(len(true_annolist)):
            true_anno = true_annolist[i]
            orig_img = imread("%s/%s" % (data_dir, true_anno.imageName))[:, :, :3]
            img = imresize(orig_img, (H["image_height"], H["image_width"]), interp="cubic")
            feed = {x_in: img}
            (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName
            new_img, rects = add_rectangles(
                H,
                [img],
                np_pred_confidences,
                np_pred_boxes,
                use_stitching=True,
                rnn_len=H["rnn_len"],
                min_conf=0.2,
                tau=args.tau,
            )

            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes(
                (H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]
            )
            pred_annolist.append(pred_anno)

            imname = "%s/%s" % (image_dir, os.path.basename(true_anno.imageName))
            misc.imsave(imname, new_img)
            if i % 25 == 0:
                print(i)
    return pred_annolist, true_annolist
Beispiel #6
0
def get_results(args, H, data_dir):
    tf.reset_default_graph()
    H["grid_width"] = H["image_width"] / H["region_size"]
    H["grid_height"] = H["image_height"] / H["region_size"]
    if args.frozen_graph:
        graph = load_frozen_graph(args.graphfile)
    else:
        new_saver = tf.train.import_meta_graph(args.graphfile)
    NUM_THREADS = 8
    with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS),
                    graph=graph if args.frozen_graph else None) as sess:
        sess.run(tf.global_variables_initializer())
        if args.frozen_graph:
            x_in = graph.get_tensor_by_name('x_in:0')
            pred_boxes = graph.get_tensor_by_name('add:0')
            pred_confidences = graph.get_tensor_by_name('Reshape_2:0')
        else:
            new_saver.restore(sess, args.weights)
            x_in = tf.get_collection('placeholders')[0]
            pred_boxes, pred_confidences = tf.get_collection('vars')
            #freeze_graph.freeze_graph("overfeat.pb", "", False, args.weights, "add,Reshape_2", "save/restore_all",
            #"save/Const:0", "overfeat_frozen.pb", False, '')

        pred_annolist = al.AnnoList()

        included_extenstions = ['jpg', 'bmp', 'png', 'gif']
        image_names = [fn for fn in os.listdir(args.datadir) if any(fn.lower().endswith(ext) for ext in included_extenstions)]
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        for i in range(len(image_names)):
            image_name = image_names[i]
            if H['grayscale']:
                orig_img = cv2.imread(image_name)
                cv2.cvtColor(orig_img,cv2.COLOR_BGR2RGB)
                if len(orig_img.shape) < 3:
                    orig_img = cv2.cvtColor(orig_img, cv2.COLOR_GRAY2RGB)
            else:
                orig_img = cv2.imread('%s/%s' % (data_dir, image_name))
                cv2.cvtColor(orig_img,cv2.COLOR_BGR2RGB)
            img = cv2.resize(orig_img, (H["image_width"],H["image_height"]), interpolation=cv2.INTER_CUBIC)
            feed = {x_in: img}
            start_time = time()
            (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
            time_2 = time()
            pred_anno = al.Annotation()
            pred_anno.imageName = image_name
            new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,
                                            use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed)
            print(time() - start_time)
            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1], test=True)
            pred_annolist.append(pred_anno)

            imname = '%s/%s' % (image_dir, os.path.basename(image_name))
            misc.imsave(imname, new_img)
            if i % 25 == 0:
                print(i)
    return pred_annolist
Beispiel #7
0
def process_results(queue, H, args, db_args, data_dir, ts):
    conn = psycopg2.connect(**db_args)
    cur = conn.cursor()
    while True:
        item = queue.get()
        if item is None:
            return

        (np_pred_boxes, np_pred_confidences), meta, VERSION = item
        pred_anno = al.Annotation()
        rects = get_rectangles(
            H,
            np_pred_confidences,
            np_pred_boxes,
            use_stitching=True,
            rnn_len=H['rnn_len'],
            min_conf=args.min_conf,
            tau=args.tau,
        )

        (roff, coff, filename, valid_geom, done, height, width,
         img_geom) = meta
        img_geom = shape(img_geom)

        pred_anno.rects = rects
        pred_anno.imagePath = os.path.abspath(data_dir)
        pred_anno = rescale_boxes((H["image_height"], H["image_width"]),
                                  pred_anno, height, width)

        bounds = img_geom.bounds
        ref_point = (bounds[3], bounds[0])  # top left corner

        for r in rects:
            minx, miny = raster_to_proj(r.x1 + coff, r.y1 + roff, img_geom,
                                        ref_point)
            maxx, maxy = raster_to_proj(r.x2 + coff, r.y2 + roff, img_geom,
                                        ref_point)
            building = box(minx, miny, maxx, maxy)

            cur.execute(
                """
                INSERT INTO buildings.buildings (filename, minx, miny, maxx, maxy, roff, coff, score, project, ts, version, geom)
                VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s::uuid, ST_GeomFromText(%s, 4326))
            """, (filename, int(r.x1), int(r.y1), int(r.x2), int(r.y2), roff,
                  coff, r.score, args.country, ts, VERSION, building.wkt))

        if done:
            cur.execute(
                "UPDATE buildings.images SET last_tested=%s WHERE project=%s AND filename=%s",
                (ts, args.country, filename))
            conn.commit()
            print('Committed image: %s' % filename)
Beispiel #8
0
    def eval(self, weights, test_boxes, min_conf, tau, show_suppressed, expname):
        self.H["grid_width"] = self.H["image_width"] / self.H["region_size"]
        self.H["grid_height"] = self.H["image_height"] / self.H["region_size"]
        x_in = tf.placeholder(tf.float32, name='x_in', shape=[self.H['image_height'], self.H['image_width'], 3])
        if self.H['use_rezoom']:
            pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = self.build_forward(
                tf.expand_dims(x_in, 0), 'test', reuse=None)
            grid_area = self.H['grid_height'] * self.H['grid_width']
            pred_confidences = tf.reshape(
                tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * self.H['rnn_len'], 2])),
                [grid_area, self.H['rnn_len'], 2])
            if self.H['reregress']:
                pred_boxes = pred_boxes + pred_boxes_deltas
        else:
            pred_boxes, pred_logits, pred_confidences = self.build_forward(tf.expand_dims(x_in, 0), 'test', reuse=None)
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(sess, weights)

            pred_annolist = al.AnnoList()

            true_annolist = al.parse(test_boxes)
            data_dir = os.path.dirname(test_boxes)
            image_dir = self.get_image_dir(weights, expname, test_boxes)
            subprocess.call('mkdir -p %s' % image_dir, shell=True)
            for i in range(len(true_annolist)):
                true_anno = true_annolist[i]
                print(true_anno.imageName)
                orig_img = cv2.imread(true_anno.imageName)[:, :, :3]
                cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB)
                img = cv2.resize(orig_img, (self.H["image_width"], self.H["image_height"]))
                feed = {x_in: img}
                (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
                pred_anno = al.Annotation()
                pred_anno.imageName = true_anno.imageName
                new_img, rects = add_rectangles(self.H, [img], np_pred_confidences, np_pred_boxes,
                                                use_stitching=True, rnn_len=self.H['rnn_len'], min_conf=min_conf,
                                                tau=tau, show_suppressed=show_suppressed)

                pred_anno.rects = rects
                pred_anno.imagePath = os.path.abspath(data_dir)
                pred_anno = rescale_boxes((self.H["image_height"], self.H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
                pred_annolist.append(pred_anno)

                imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName))
                cv2.imwrite(imname, new_img)
                if i % 25 == 0:
                    print(i)

        return pred_annolist, true_annolist
def hot_predict(image_path, parameters, to_json=True):
    """Makes predictions when all long running preparation operations are made. 
    
    Args:
        image_path (string): The path to the source image. 
        parameters (dict): The parameters produced by :func:`initialize`.
    Returns (Annotation):
        The annotation for the source image.
    """
    coord = []
    r = []
    H = parameters['hypes']
    # The default options for prediction of bounding boxes.
    options = H['evaluate']
    if 'pred_options' in parameters:
        # The new options for prediction of bounding boxes
        for key, val in parameters['pred_options'].items():
            options[key] = val

    # predict
    img = imresize(image_path, (H['image_height'], H['image_width']),
                   interp='cubic')
    (np_pred_boxes, np_pred_confidences) = parameters['sess'].\
        run([parameters['pred_boxes'], parameters['pred_confidences']], feed_dict={parameters['x_in']: img})
    pred_anno = al.Annotation()
    pred_anno.imageName = image_path
    _, rects = add_rectangles(H, [img],
                              np_pred_confidences,
                              np_pred_boxes,
                              use_stitching=True,
                              rnn_len=H['rnn_len'],
                              min_conf=options['min_conf'],
                              tau=options['tau'],
                              show_suppressed=False)

    pred_anno.rects = [r for r in rects if r.x1 < r.x2 and r.y1 < r.y2]
    pred_anno = rescale_boxes((H['image_height'], H['image_width']), pred_anno,
                              image_path.shape[0], image_path.shape[1])
    result = [
        r.writeJSON() for r in pred_anno if r.score > options['min_conf']
    ] if to_json else pred_anno
    if not r:
        coord = [0, 0, 0, 0, 0]
    else:
        coord = [r.x1, r.x2, r.y1, r.y2, r.score]

    return result, coord
Beispiel #10
0
def postprocess_regular(image_info, np_pred_boxes, np_pred_confidences, H, options):
    pred_anno = al.Annotation()
    pred_anno.imageName = image_info['path']
    pred_anno.imagePath = os.path.abspath(image_info['path'])
    _, rects = add_rectangles(H, [image_info['transformed']], np_pred_confidences, np_pred_boxes, use_stitching=True,
                              rnn_len=H['rnn_len'], min_conf=options['min_conf'], tau=options['tau'],
                              show_suppressed=False)

    h, w = image_info['original_shape']
    if 'rotate90' in H['data'] and H['data']['rotate90']:
        # original image height is a width for rotated one
        rects = Rotate90.invert(h, rects)

    rects = [r for r in rects if r.x1 < r.x2 and r.y1 < r.y2 and r.score > options['min_conf']]
    pred_anno.rects = rects
    pred_anno = rescale_boxes((H['image_height'], H['image_width']), pred_anno, h, w)
    return pred_anno
Beispiel #11
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3])
    H["grid_width"] = H["image_width"] / H["region_size"]
    H["grid_height"] = H["image_height"] / H["region_size"]
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        image_dir = args.logdir
        i=0
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        for img_list in os.listdir(args.input_images):
            img_path=os.path.join(args.input_images,img_list)
            orig_img = imread(img_path)
            #img=orig_img[0:480,640:1280]
            img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic')
            feed = {x_in: img}
            (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = img_list
            new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,
                                            use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed)
        
            pred_anno.rects = rects
            pred_anno.imagePath = args.input_images
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
            pred_annolist.append(pred_anno)
            
            imname = '%s/%s' % (args.logdir, img_list)
            misc.imsave(imname, new_img)
            i +=1
            if i % 25 == 0:
                print(i)
    return pred_annolist
Beispiel #12
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # sess.run(tf.initialize_all_variables())      
        sess.run(tf.global_variables_initializer())
        # saver.restore(sess, './output/lstm_rezoom_lung_2017_01_17_18.24/save.ckpt-1000000')
        print('args.weights: %s' % (args.weights,))
        saver.restore(sess, args.weights)
        print('run')
        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_boxes)
        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        for i in range(len(true_annolist)):
            true_anno = true_annolist[i]
            orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3]
            img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic')
            feed = {x_in: img}
            (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName
            new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,
                                            use_stitching=True, rnn_len=H['rnn_len'], min_conf=0.2, tau=args.tau)
        
            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
            pred_annolist.append(pred_anno)
            
            imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName))
            misc.imsave(imname, new_img)
            if i % 25 == 0:
                print(i)
    return pred_annolist, true_annolist
Beispiel #13
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) #写真のInput詳細がわからない
    if H['use_rezoom']: 
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, args.weights) 

        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_boxes)
        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        for i in range(len(true_annolist)):
            true_anno = true_annolist[i]
            orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3]
            img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') #画像のResize
            feed = {x_in: img} #一写真一Step
            (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName
            new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, #画像に予測した四角形を追加する
                                            use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed)
        
            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) #予測値(四角形のxy座標)を画像のsizeに合わせる?
            pred_annolist.append(pred_anno)
            
            imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName))
            misc.imsave(imname, new_img)
            if i % 25 == 0:
                print(i)
    #pred_annolist:予測値(確信度score:全部、予測のx1,y1,x2,y2)
    #true_annolist:evalファイルのx1,y1,x2,y2のまま。
    return pred_annolist, true_annolist 
Beispiel #14
0
def sample(args, H, conn):
    tf.reset_default_graph()
    H["grid_width"] = H["image_width"] / H["region_size"]
    H["grid_height"] = H["image_height"] / H["region_size"]
    x_in = tf.placeholder(tf.float32,
                          name='x_in',
                          shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(
            tf.nn.softmax(
                tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])),
            [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, args.weights)

        img_iter = RandomSampler(conn, args.country)

        img, orig, (roff, coff, filename, whole_img, img_geom) = next(img_iter)
        current = {
            'filename': filename,
            'whole_img': whole_img,
            'img_geom': img_geom,
            'boxes': []
        }
        cur = conn.cursor()

        upload_samples = 0
        while upload_samples < args.max_samples:
            if current['filename'] and current['filename'] != filename:
                res = process_file(current['filename'],
                                   np.array(current['boxes']),
                                   current['whole_img'], args.country,
                                   current['img_geom'])
                cur.execute(
                    "UPDATE buildings.images SET done=true WHERE project=%s AND filename=%s",
                    (args.country, current['filename']))
                conn.commit()
                print('Done with %s' % current['filename'])
                current = {
                    'filename': filename,
                    'whole_img': whole_img,
                    'img_geom': img_geom,
                    'boxes': []
                }
                upload_samples += 1 if res else 0

            img = imresize(img, (H["image_height"], H["image_width"]),
                           interp='cubic')
            feed = {x_in: img}
            (np_pred_boxes,
             np_pred_confidences) = sess.run([pred_boxes, pred_confidences],
                                             feed_dict=feed)
            pred_anno = al.Annotation()
            new_img, rects, _ = add_rectangles(
                H, [img],
                np_pred_confidences,
                np_pred_boxes,
                use_stitching=True,
                rnn_len=H['rnn_len'],
                min_conf=args.min_conf,
                tau=args.tau,
                show_suppressed=args.show_suppressed)
            pred_anno.rects = rects
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]),
                                      pred_anno, orig.shape[0], orig.shape[1])
            for r in rects:
                current['boxes'].append(
                    map(int,
                        [r.x1 + coff, r.y1 + roff, r.x2 + coff, r.y2 + roff]))

            img, orig, (roff, coff, filename, whole_img,
                        img_geom) = next(img_iter)
Beispiel #15
0
def get_results(args, H):
    tf.reset_default_graph()
    H["grid_width"] = H["image_width"] / H["region_size"]
    H["grid_height"] = H["image_height"] / H["region_size"]
    x_in = tf.placeholder(tf.float32,
                          name='x_in',
                          shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(
            tf.nn.softmax(
                tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])),
            [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    all_preditions = []
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_boxes)
        data_dir = os.path.join(os.path.dirname(args.test_boxes))

        false_positives, false_negatives, true_positives = 0, 0, 0

        total_time = 0.0

        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        for i in range(len(true_annolist)):
            true_anno = true_annolist[i]

            orig_img = imread('%s/%s' %
                              (data_dir, true_anno.imageName))[:, :, :3]
            img = imresize(orig_img, (H["image_height"], H["image_width"]),
                           interp='cubic')
            feed = {x_in: img}

            t0 = time.time()
            (np_pred_boxes,
             np_pred_confidences) = sess.run([pred_boxes, pred_confidences],
                                             feed_dict=feed)
            total_time += time.time() - t0

            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName
            new_img, rects, all_rects = add_rectangles(
                H, [img],
                np_pred_confidences,
                np_pred_boxes,
                use_stitching=True,
                rnn_len=H['rnn_len'],
                min_conf=args.min_conf,
                tau=args.tau,
                show_suppressed=args.show_suppressed)
            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]),
                                      pred_anno, orig_img.shape[0],
                                      orig_img.shape[1])
            pred_annolist.append(pred_anno)

            all_preditions.extend([[r.x1, r.y1, r.x2, r.y2, r.score, i]
                                   for r in all_rects])

            prediction = np.array([[r.x1, r.y1, r.x2, r.y2, r.score]
                                   for r in rects])
            targets = np.array([[r.x1, r.y1, r.x2, r.y2]
                                for r in true_anno.rects])

            fp, fn, tp, jaccard = get_metrics(targets, prediction)
            false_positives += fp
            false_negatives += fn
            true_positives += tp

            precision = np.float64(true_positives) / (true_positives +
                                                      false_positives)
            recall = np.float64(true_positives) / (true_positives +
                                                   false_negatives)

            print(
                '[%d/%d]: False positives: %d, False negatives: %d, True positives: %d, Precision: %f, Recall: %f'
                % (i, len(true_annolist), false_positives, false_negatives,
                   true_positives, precision, recall))

    df = pandas.DataFrame(all_preditions)
    df.columns = ['x1', 'y1', 'x2', 'y2', 'score', 'image_id']

    print('Total time: %.4f seconds, per image: %.4f' %
          (total_time, total_time / len(true_annolist)))

    return df
Beispiel #16
0
def run_eval(H, checkpoint_dir , hypes_file, output_path):
  """Do Evaluation with full epoche of data.

  Args:
    H: Hypes
    checkpoint_dir: directory with checkpoint files
    output_path: path to save results
  """

  #Load GT
  true_idl = H['data']['test_idl']
  true_annos = al.parse(true_idl)

  # define output files
  pred_file = 'val_%s.idl' % os.path.basename(hypes_file).replace('.json', '')
  pred_idl = os.path.join(output_path, pred_file)
  true_file = 'true_%s.idl' % os.path.basename(hypes_file).replace('.json', '')
  true_idl_scaled = os.path.join(output_path, true_file)

  data_folder = os.path.dirname(os.path.realpath(true_idl))

  #Load Graph Model
  tf.reset_default_graph()
  googlenet = googlenet_load.init(H)
  x_in = tf.placeholder(tf.float32, name='x_in')
  if H['arch']['use_lstm']:
    lstm_forward = build_lstm_forward(H, tf.expand_dims(x_in, 0), 
                                 googlenet, 'test', reuse=None)
    pred_boxes, pred_logits, pred_confidences = lstm_forward
  else:
    overfeat_forward = build_overfeat_forward(H, tf.expand_dims(x_in, 0),
                                              googlenet, 'test')
    pred_boxes, pred_logits, pred_confidences = overfeat_forward

  start_time = time.time()  
  saver = tf.train.Saver()
  with tf.Session() as sess:
    logging.info("Starting Evaluation")
    sess.run(tf.initialize_all_variables())

    # Restore Checkpoints
    ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
    if ckpt and ckpt.model_checkpoint_path:
      logging.info(ckpt.model_checkpoint_path)
      saver.restore(sess, ckpt.model_checkpoint_path)

    annolist = al.AnnoList()
    trueanno = al.AnnoList()

    #shuffle true_annos to randomize plottet Images
    shuffle(true_annos)

    for i in range(len(true_annos)):
      true_anno = true_annos[i]
      img = imread( os.path.join(data_folder, true_anno.imageName))

      # Rescale Boxes
      trueanno.append(rescale_boxes(img.shape, true_annos[i],
                                    H["arch"]["image_height"],
                                    H["arch"]["image_width"]))
      # Rescale Images
      img = imresize(img, (H["arch"]["image_height"],
                           H["arch"]["image_width"]), interp='cubic')

      feed = {x_in: img}
      (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes,
                                                       pred_confidences],
                                                      feed_dict=feed)
      pred_anno = al.Annotation()
      pred_anno.imageName = true_anno.imageName
      new_img, rects = add_rectangles([img], np_pred_confidences,
                                      np_pred_boxes, H["arch"],
                                      use_stitching=True,
                                      rnn_len=H['arch']['rnn_len'],
                                      min_conf=0.3)
  
      pred_anno.rects = rects
      annolist.append(pred_anno)

      if i % 20 == 0:
      # Draw every 20th Image; 
      # plotted Image is randomized due to shuffling
        duration = time.time() - start_time
        duration = float(duration)*1000/20
        out_img = os.path.join(output_path, 'test_%i.png'%i)
        scp.misc.imsave(out_img, new_img)
        logging.info('Step %d: Duration %.3f ms'
                                   % (i, duration))
        start_time = time.time()

  annolist.save(pred_idl)
  trueanno.save(true_idl_scaled)

  # write results to disk
  iou_threshold = 0.5
  rpc_cmd = './utils/annolist/doRPC.py --minOverlap %f %s %s' % (iou_threshold, true_idl_scaled,
                                                                 pred_idl)
  rpc_output = subprocess.check_output(rpc_cmd, shell=True)
  txt_file = [line for line in rpc_output.split('\n') if line.strip()][-1]
  output_png = os.path.join(output_path, "roc.png")
  plot_cmd = './utils/annolist/plotSimple.py %s --output %s' % (txt_file, output_png)
  plot_output = subprocess.check_output(plot_cmd, shell=True)
Beispiel #17
0
def run_eval(H, checkpoint_dir, hypes_file, output_path):
    """Do Evaluation with full epoche of data.

  Args:
    H: Hypes
    checkpoint_dir: directory with checkpoint files
    output_path: path to save results
  """

    #Load GT
    true_idl = H['data']['test_idl']
    true_annos = al.parse(true_idl)

    # define output files
    pred_file = 'val_%s.idl' % os.path.basename(hypes_file).replace(
        '.json', '')
    pred_idl = os.path.join(output_path, pred_file)
    true_file = 'true_%s.idl' % os.path.basename(hypes_file).replace(
        '.json', '')
    true_idl_scaled = os.path.join(output_path, true_file)

    data_folder = os.path.dirname(os.path.realpath(true_idl))

    #Load Graph Model
    tf.reset_default_graph()
    googlenet = googlenet_load.init(H)
    x_in = tf.placeholder(tf.float32, name='x_in')
    if H['arch']['use_lstm']:
        lstm_forward = build_lstm_forward(H,
                                          tf.expand_dims(x_in, 0),
                                          googlenet,
                                          'test',
                                          reuse=None)
        pred_boxes, pred_logits, pred_confidences = lstm_forward
    else:
        overfeat_forward = build_overfeat_forward(H, tf.expand_dims(x_in, 0),
                                                  googlenet, 'test')
        pred_boxes, pred_logits, pred_confidences = overfeat_forward

    start_time = time.time()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        logging.info("Starting Evaluation")
        sess.run(tf.initialize_all_variables())

        # Restore Checkpoints
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            logging.info(ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)

        annolist = al.AnnoList()
        trueanno = al.AnnoList()

        #shuffle true_annos to randomize plottet Images
        shuffle(true_annos)

        for i in range(len(true_annos)):
            true_anno = true_annos[i]
            img = imread(os.path.join(data_folder, true_anno.imageName))

            # Rescale Boxes
            trueanno.append(
                rescale_boxes(img.shape, true_annos[i],
                              H["arch"]["image_height"],
                              H["arch"]["image_width"]))
            # Rescale Images
            img = imresize(
                img, (H["arch"]["image_height"], H["arch"]["image_width"]),
                interp='cubic')

            feed = {x_in: img}
            (np_pred_boxes,
             np_pred_confidences) = sess.run([pred_boxes, pred_confidences],
                                             feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName
            new_img, rects = add_rectangles([img],
                                            np_pred_confidences,
                                            np_pred_boxes,
                                            H["arch"],
                                            use_stitching=True,
                                            rnn_len=H['arch']['rnn_len'],
                                            min_conf=0.3)

            pred_anno.rects = rects
            annolist.append(pred_anno)

            if i % 20 == 0:
                # Draw every 20th Image;
                # plotted Image is randomized due to shuffling
                duration = time.time() - start_time
                duration = float(duration) * 1000 / 20
                out_img = os.path.join(output_path, 'test_%i.png' % i)
                scp.misc.imsave(out_img, new_img)
                logging.info('Step %d: Duration %.3f ms' % (i, duration))
                start_time = time.time()

    annolist.save(pred_idl)
    trueanno.save(true_idl_scaled)

    # write results to disk
    iou_threshold = 0.5
    rpc_cmd = './utils/annolist/doRPC.py --minOverlap %f %s %s' % (
        iou_threshold, true_idl_scaled, pred_idl)
    rpc_output = subprocess.check_output(rpc_cmd, shell=True)
    txt_file = [line for line in rpc_output.split('\n') if line.strip()][-1]
    output_png = os.path.join(output_path, "roc.png")
    plot_cmd = './utils/annolist/plotSimple.py %s --output %s' % (txt_file,
                                                                  output_png)
    plot_output = subprocess.check_output(plot_cmd, shell=True)
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
	
	memory = sysv_ipc.SharedMemory(123463)
	memory2 = sysv_ipc.SharedMemory(123464)
	size = 768, 1024, 3

	pedal = PyMouse()
	pedal.press(1)
	road_center = 320
	while True:
	    cv2.waitKey(1)
	    frameCount = bytearray(memory.read())
	    curve = bytearray(memory2.read())
	    curve = str(struct.unpack('i',curve)[0])
	    m = np.array(frameCount, dtype=np.uint8)
	    orig_img = m.reshape(size)
	   
	    img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic')
	    feed = {x_in: img}
	    (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
	    pred_anno = al.Annotation()
	    
	    new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,
					    use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed)
	    flag = 0
	    road_center = 320 + int(curve)
	    print(road_center)
	    for rect in rects:
		print(rect.x1, rect.x2, rect.y2)
		if (rect.x1 < road_center and rect.x2 > road_center and rect.y2 > 200) and (rect.x2 - rect.x1 > 30):
			flag = 1

	    if flag is 1:
		pedal.press(2)
		print("break!")
	    else:
		pedal.release(2)
		pedal.press(1)
		print("acceleration!")
		
	    pred_anno.rects = rects
	    pred_anno.imagePath = os.path.abspath(data_dir)
	    pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
	    pred_annolist.append(pred_anno)
	    
	    cv2.imshow('.jpg', new_img)
	    
    return none;
Beispiel #19
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32,
                          name='x_in',
                          shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(
            tf.nn.softmax(
                tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])),
            [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_boxes)
        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        os.makedirs(image_dir, exist_ok=True)
        print('Outputs will be stored in {}'.format(image_dir))
        for i in range(len(true_annolist)):
            try:
                true_anno = true_annolist[i]
                orig_img = imread('%s/%s' %
                                  (data_dir, true_anno.imageName))[:, :, :3]
                img = imresize(orig_img, (H["image_height"], H["image_width"]),
                               interp='cubic')
                feed = {x_in: img}
                (np_pred_boxes, np_pred_confidences) = sess.run(
                    [pred_boxes, pred_confidences], feed_dict=feed)
                pred_anno = al.Annotation()
                pred_anno.imageName = true_anno.imageName
                new_img, rects = add_rectangles(
                    H, [img],
                    np_pred_confidences,
                    np_pred_boxes,
                    use_stitching=True,
                    rnn_len=H['rnn_len'],
                    min_conf=args.min_conf,
                    tau=args.tau,
                    show_suppressed=args.show_suppressed)

                rects = [r for r in rects if r.x1 < r.x2 and r.y1 < r.y2]
                pred_anno.rects = rects
                pred_anno.imagePath = os.path.abspath(data_dir)
                pred_anno = rescale_boxes(
                    (H["image_height"], H["image_width"]),
                    pred_anno,
                    orig_img.shape[0],
                    orig_img.shape[1],
                    test=True)
                pred_annolist.append(pred_anno)

                imname = '%s/%s' % (image_dir,
                                    os.path.basename(true_anno.imageName))
                misc.imsave(imname, new_img)
            except FileNotFoundError:
                pass
            if i % 25 == 0:
                print(i)
    return pred_annolist, true_annolist
Beispiel #20
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32,
                          name='x_in',
                          shape=[H['image_height'], H['image_width'], 3])
    p1_x_in = tf.placeholder(tf.float32,
                             name='p1_x_in',
                             shape=[H['image_height'], H['image_width'], 3])
    p2_x_in = tf.placeholder(tf.float32,
                             name='p2_x_in',
                             shape=[H['image_height'], H['image_width'], 3])
    p3_x_in = tf.placeholder(tf.float32,
                             name='p3_x_in',
                             shape=[H['image_height'], H['image_width'], 3])
    p4_x_in = tf.placeholder(tf.float32,
                             name='p4_x_in',
                             shape=[H['image_height'], H['image_width'], 3])
    p5_x_in = tf.placeholder(tf.float32,
                             name='p5_x_in',
                             shape=[H['image_height'], H['image_width'], 3])
    p6_x_in = tf.placeholder(tf.float32,
                             name='p6_x_in',
                             shape=[H['image_height'], H['image_width'], 3])
    p7_x_in = tf.placeholder(tf.float32,
                             name='p7_x_in',
                             shape=[H['image_height'], H['image_width'], 3])
    p8_x_in = tf.placeholder(tf.float32,
                             name='p8_x_in',
                             shape=[H['image_height'], H['image_width'], 3])
    f_x_in = tf.placeholder(tf.float32,
                            name='f_x_in',
                            shape=[H['image_height'], H['image_width'], 3])

    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(
            H,
            tf.expand_dims(x_in, 0),
            tf.expand_dims(p1_x_in, 0),
            tf.expand_dims(p2_x_in, 0),
            tf.expand_dims(p3_x_in, 0),
            tf.expand_dims(p4_x_in, 0),
            tf.expand_dims(p5_x_in, 0),
            tf.expand_dims(p6_x_in, 0),
            tf.expand_dims(p7_x_in, 0),
            tf.expand_dims(p8_x_in, 0),
            tf.expand_dims(f_x_in, 0),
            'test',
            reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(
            tf.nn.softmax(
                tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])),
            [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_boxes)
        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        count_error = list()
        for i in range(20):
            count_error.append(0)

        for i in range(len(true_annolist)):
            true_anno = true_annolist[i]
            orig_img = imread('%s/%s' %
                              (data_dir, true_anno.imageName))[:, :, :3]
            dir_path = os.path.dirname(true_anno.imageName)
            file_name = true_anno.imageName.split('/')[-1]
            (shotname, extension) = os.path.splitext(file_name)
            p1_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) - 1)).zfill(4) + ".png"
            p2_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) - 2)).zfill(4) + ".png"
            p3_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) - 3)).zfill(4) + ".png"
            p4_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) - 4)).zfill(4) + ".png"
            p5_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) - 5)).zfill(4) + ".png"
            p6_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) - 6)).zfill(4) + ".png"
            p7_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) - 7)).zfill(4) + ".png"
            p8_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) - 8)).zfill(4) + ".png"
            f_image_path = data_dir + "/" + dir_path + "/" + (
                str(int(shotname) + 1)).zfill(4) + ".png"
            if not os.path.exists(p1_image_path):
                print "File not exists: %s" % p1_image_path
                exit()
            if not os.path.exists(p2_image_path):
                print "File not exists: %s" % p2_image_path
                exit()
            if not os.path.exists(f_image_path):
                print "File not exists: %s" % f_image_path
                exit()

            p1_img = imread(p1_image_path)
            p2_img = imread(p2_image_path)
            p3_img = imread(p3_image_path)
            p4_img = imread(p4_image_path)
            p5_img = imread(p5_image_path)
            p6_img = imread(p6_image_path)
            p7_img = imread(p7_image_path)
            p8_img = imread(p8_image_path)
            f_img = imread(f_image_path)

            img = imresize(orig_img, (H["image_height"], H["image_width"]),
                           interp='cubic')
            feed = {
                x_in: img,
                p1_x_in: p1_img,
                p2_x_in: p2_img,
                p3_x_in: p3_img,
                p4_x_in: p4_img,
                p5_x_in: p5_img,
                p6_x_in: p6_img,
                p7_x_in: p7_img,
                p8_x_in: p8_img,
                f_x_in: f_img
            }
            (np_pred_boxes,
             np_pred_confidences) = sess.run([pred_boxes, pred_confidences],
                                             feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName

            true_count = len(true_anno.rects)
            # print true_count
            for j in range(20):
                min_confidence = (j * 1.0) / 20.0
                new_img, rects, count = add_rectangles(
                    H, [img],
                    np_pred_confidences,
                    np_pred_boxes,
                    use_stitching=True,
                    rnn_len=H['rnn_len'],
                    min_conf=min_confidence,
                    tau=args.tau,
                    show_suppressed=args.show_suppressed)
                count_error[j] += abs(count - true_count)

            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]),
                                      pred_anno, orig_img.shape[0],
                                      orig_img.shape[1])
            pred_annolist.append(pred_anno)

            # imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName))
            # misc.imsave(imname, new_img)
            if i % 25 == 0:
                print(i)

    print "Count error: %s" % str(min(count_error) / 350.0)

    return pred_annolist, true_annolist
Beispiel #21
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32,
                          name='x_in',
                          shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(
            tf.nn.softmax(
                tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])),
            [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_boxes)
        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        for i in range(len(true_annolist)):
            true_anno = true_annolist[i]
            #orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3]
            orig_img = imread('%s' % (true_anno.imageName))
            img = imresize(orig_img, (H["image_height"], H["image_width"]),
                           interp='cubic')
            x = np.reshape(img, (H['image_height'], H['image_width'], 1))
            new_x = np.zeros((H['image_height'], H['image_width'], 3))
            for first in range(0, H['image_height']):
                for second in range(0, H['image_width']):
                    new_x[first][second] = [
                        x[first][second], x[first][second], x[first][second]
                    ]
            feed = {x_in: new_x}
            (np_pred_boxes,
             np_pred_confidences) = sess.run([pred_boxes, pred_confidences],
                                             feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName
            new_img, rects = add_rectangles(
                H, [img],
                np_pred_confidences,
                np_pred_boxes,
                use_stitching=True,
                rnn_len=H['rnn_len'],
                min_conf=args.min_conf,
                tau=args.tau,
                show_suppressed=args.show_suppressed)

            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]),
                                      pred_anno, orig_img.shape[0],
                                      orig_img.shape[1])
            pred_annolist.append(pred_anno)

            imname = '%s/%s' % (image_dir, os.path.basename(
                true_anno.imageName))
            misc.imsave(imname, new_img)
            for i in range(0, len(rects)):
                if rects[i].score > 0.1:
                    print pred_anno.imageName
                    print "%s %s %s %s %s" % (rects[i].x1, rects[i].x2,
                                              rects[i].y1, rects[i].y2,
                                              rects[i].score)
                    # print r.writeJSON(rects[i])
            #if i % 25 == 0:
            #    print(i)
    return pred_annolist, true_annolist
Beispiel #22
0
def get_results(args, H):
    tf.reset_default_graph()
    H["grid_width"] = H["image_width"] / H["region_size"]
    H["grid_height"] = H["image_height"] / H["region_size"]
    x_in = tf.placeholder(tf.float32,
                          name='x_in',
                          shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(
            tf.nn.softmax(
                tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])),
            [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_boxes)
        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        for i in range(len(true_annolist)):
            true_anno = true_annolist[i]
            orig_img = cv2.imread(os.path.join(data_dir,
                                               true_anno.imageName))[:, :, :3]
            img = cv2.resize(orig_img, (H["image_width"], H["image_height"]),
                             interpolation=cv2.INTER_CUBIC)

            feed = {x_in: img}
            (np_pred_boxes,
             np_pred_confidences) = sess.run([pred_boxes, pred_confidences],
                                             feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName
            new_img, rects = add_rectangles(
                H, [img],
                np_pred_confidences,
                np_pred_boxes,
                use_stitching=True,
                rnn_len=H['rnn_len'],
                min_conf=args.min_conf,
                tau=args.tau,
                show_suppressed=args.show_suppressed)

            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]),
                                      pred_anno, orig_img.shape[0],
                                      orig_img.shape[1])
            pred_annolist.append(pred_anno)

            imname = os.path.join(image_dir,
                                  os.path.basename(true_anno.imageName))
            cv2.imwrite(imname, new_img)
            if i % 25 == 0:
                print(i)
    return pred_annolist, true_annolist
Beispiel #23
0
def main(args, logger):
    # setup
    logger.info(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = args.gpu_fraction

    # path
    path_hypes_file = '{}/hypes.json'.format(os.path.dirname(args.weights))
    with open(path_hypes_file, 'r') as f:
        H = json.load(f)
    expname = args.expname + '_' if args.expname else ''

    # graph
    tf.reset_default_graph()
    H['grid_width'] = H['image_width'] / H['region_size']
    H['grid_height'] = H['image_height'] / H['region_size']
    X = tf.placeholder(tf.float32,
                       name='input',
                       shape=(H['image_height'], H['image_width'], 3))
    if H['use_rezoom']:
        (pred_boxes, pred_logits, pred_confidences, pred_confs_deltas,
         pred_boxes_deltas) = build_forward(H,
                                            tf.expand_dims(X, 0),
                                            'test',
                                            reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        reshape_shape = [grid_area * H['rnn_len'], 2]
        pred_confidences = tf.reshape(
            tf.nn.softmax(tf.reshape(pred_confs_deltas, reshape_shape)),
            reshape_shape)
        pred_boxes = pred_boxes + pred_boxes_deltas if H[
            'reregress'] else pred_boxes
    else:
        (pred_boxes, pred_logits,
         pred_confidences) = build_forward(H,
                                           tf.expand_dims(X, 0),
                                           'test',
                                           reuse=None)

    # load checkopint
    saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        # get all video candidate
        video_paths = glob(
            os.path.join(args.video_root, '*.{}'.format(args.video_type)))
        for v in video_paths:
            video_fullname = '.'.join(v.split('.')[:-1])
            video_name = video_fullname.split('/')[-1]
            txtname = video_fullname + '_detection.txt'
            txtname = '/'.join(
                [args.outputdir, video_name,
                 txtname.split('/')[-1]])
            if os.path.isfile(txtname):
                logger.info('{} existed, pass'.format(txtname))
                continue
            if not os.path.exists(os.path.dirname(txtname)):
                os.makedirs(os.path.dirname(txtname))

            logger.info('Predicting {}'.format(os.path.basename(v)))

            # video operation
            cap = cv2.VideoCapture(v)
            total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            resolution = tuple(map(int, (cap.get(3), cap.get(4))))
            filename = 'detected_{}'.format(os.path.basename(v))

            # output video
            if args.output_video:
                outputdir = os.path.join(
                    args.outputdir, '{}-skip-{}-count-{}'.format(
                        datetime.now().strftime('%Y%m%d'), args.skip_nframe,
                        args.frame_count or 'all'))
                if not os.path.exists(outputdir):
                    os.makedirs(outputdir)
                out = cv2.VideoWriter(os.path.join(outputdir, filename),
                                      fourcc, 15, resolution)

            data = []
            logger.info('total {} skip {}'.format(total_frame,
                                                  args.skip_nframe))
            for frame_idx in tqdm(range(0, total_frame, args.skip_nframe)):
                if args.frame_count and len(data) > args.frame_count:
                    break
                if not cap.isOpened():
                    logger.error('{} is close'.format(os.path.basename(v)))
                ok, frame = cap.read()

                if ok:
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    image = cv2.resize(frame,
                                       (H['image_width'], H['image_height']))
                    (np_pred_boxes, np_pred_confidences) = sess.run(
                        [pred_boxes, pred_confidences], feed_dict={X: image})
                    pred_anno = al.Annotation()
                    new_img, rects = add_rectangles(
                        H, [image],
                        np_pred_confidences,
                        np_pred_boxes,
                        use_stitching=True,
                        rnn_len=H['rnn_len'],
                        min_conf=args.min_conf,
                        tau=args.tau,
                        show_suppressed=args.suppressed)

                    pred_anno.rects = rects
                    pred_anno = rescale_boxes(
                        (H["image_height"], H["image_width"]), pred_anno,
                        frame.shape[0], frame.shape[1])

                    results = []
                    for r in pred_anno.rects:
                        results.append([
                            max(r.y1, 0),
                            max(r.x1, 0),
                            max(r.y2, 0),
                            max(r.x2, 0), r.score
                        ])
                    data.append(str([frame_idx + 1, results]) + '\n')
                    pred_annolist.append(pred_anno)
                    if args.output_video:
                        out.write(new_img)
                else:
                    logger.warning('cannot read frame {}'.format(frame_idx))

            cap.release()
            if args.output_video:
                out.release()

            with open(txtname, 'w+') as f:
                f.writelines(data)
    def pred(self, weights, test_boxes, min_conf, tau, show_suppressed,
             expname):
        self.H["grid_width"] = self.H["image_width"] / self.H["region_size"]
        self.H["grid_height"] = self.H["image_height"] / self.H["region_size"]
        x_in = tf.placeholder(
            tf.float32,
            name='x_in',
            shape=[self.H['image_height'], self.H['image_width'], 3])

        if self.H['use_rezoom']:
            pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = self.build_forward(
                tf.expand_dims(x_in, 0), 'test', reuse=None)
            grid_area = self.H['grid_height'] * self.H['grid_width']
            pred_confidences = tf.reshape(
                tf.nn.softmax(
                    tf.reshape(pred_confs_deltas,
                               [grid_area * self.H['rnn_len'], 2])),
                [grid_area, self.H['rnn_len'], 2])
            if self.H['reregress']:
                pred_boxes = pred_boxes + pred_boxes_deltas
        else:
            pred_boxes, pred_logits, pred_confidences = self.build_forward(
                tf.expand_dims(x_in, 0), 'test', reuse=None)

        rect_list = []

        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(sess, weights)

            pred_annolist = al.AnnoList()

            true_annolist = al.parse(test_boxes)
            data_dir = os.path.dirname(test_boxes)
            image_dir = self.get_image_dir(weights, expname, test_boxes)
            subprocess.call('mkdir -p %s' % image_dir, shell=True)
            for i in range(len(true_annolist)):
                true_anno = true_annolist[i]
                orig_img = imread('%s/%s' %
                                  (data_dir, true_anno.imageName))[:, :, :3]
                img = imresize(orig_img,
                               (self.H["image_height"], self.H["image_width"]),
                               interp='cubic')
                feed = {x_in: img}
                (np_pred_boxes, np_pred_confidences) = sess.run(
                    [pred_boxes, pred_confidences], feed_dict=feed)

                pred_anno = al.Annotation()
                pred_anno.imageName = true_anno.imageName
                new_img, rects = add_rectangles(
                    self.H, [img],
                    np_pred_confidences,
                    np_pred_boxes,
                    use_stitching=True,
                    rnn_len=self.H['rnn_len'],
                    min_conf=min_conf,
                    tau=tau,
                    show_suppressed=show_suppressed)
                print 'tb model ', len(rects)
                #
                pred_anno.rects = rects
                pred_anno.imagePath = os.path.abspath(data_dir)
                pred_anno = rescale_boxes(
                    (self.H["image_height"], self.H["image_width"]), pred_anno,
                    orig_img.shape[0], orig_img.shape[1])
                pred_annolist.append(pred_anno)

                imname = 'box_sample.jpg'
                misc.imsave(imname, new_img)
                if i % 25 == 0:
                    print(i)
#
                print pred_anno.imageName
                for rect_i in range(0, len(rects)):
                    rect_list.append({})
                    rect_list[-1]['x_min'] = rects[rect_i].left() / 640.
                    rect_list[-1]['x_max'] = rects[rect_i].right() / 640.
                    rect_list[-1]['y_min'] = rects[rect_i].top() / 480.
                    rect_list[-1]['y_max'] = rects[rect_i].bottom() / 480.

        return rect_list
Beispiel #25
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32,
                          name='x_in',
                          shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(
            tf.nn.softmax(
                tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])),
            [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(
            H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)

        memory = sysv_ipc.SharedMemory(123463)
        memory2 = sysv_ipc.SharedMemory(123464)
        size = 768, 1024, 3

        pedal = PyMouse()
        pedal.press(1)
        road_center = 320
        while True:
            cv2.waitKey(1)
            frameCount = bytearray(memory.read())
            curve = bytearray(memory2.read())
            curve = str(struct.unpack('i', curve)[0])
            m = np.array(frameCount, dtype=np.uint8)
            orig_img = m.reshape(size)

            img = imresize(orig_img, (H["image_height"], H["image_width"]),
                           interp='cubic')
            feed = {x_in: img}
            (np_pred_boxes,
             np_pred_confidences) = sess.run([pred_boxes, pred_confidences],
                                             feed_dict=feed)
            pred_anno = al.Annotation()

            new_img, rects = add_rectangles(
                H, [img],
                np_pred_confidences,
                np_pred_boxes,
                use_stitching=True,
                rnn_len=H['rnn_len'],
                min_conf=args.min_conf,
                tau=args.tau,
                show_suppressed=args.show_suppressed)
            flag = 0
            road_center = 320 + int(curve)
            print(road_center)
            for rect in rects:
                print(rect.x1, rect.x2, rect.y2)
                if (rect.x1 < road_center and rect.x2 > road_center
                        and rect.y2 > 200) and (rect.x2 - rect.x1 > 30):
                    flag = 1

            if flag is 1:
                pedal.press(2)
                print("break!")
            else:
                pedal.release(2)
                pedal.press(1)
                print("acceleration!")

            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]),
                                      pred_anno, orig_img.shape[0],
                                      orig_img.shape[1])
            pred_annolist.append(pred_anno)

            cv2.imshow('.jpg', new_img)

    return none
Beispiel #26
0
def get_results(args, H):
    tf.reset_default_graph()
    x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3])
    if H['use_rezoom']:
        pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
        grid_area = H['grid_height'] * H['grid_width']
        pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2])
        if H['reregress']:
            pred_boxes = pred_boxes + pred_boxes_deltas
    else:
        pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        saver.restore(sess, args.weights)

        pred_annolist = al.AnnoList()

        true_annolist = al.parse(args.test_boxes)
        data_dir = os.path.dirname(args.test_boxes)
        image_dir = get_image_dir(args)
        #subprocess.call('mkdir -p %s' % image_dir, shell=True)

	#ivc = cv2.VideoCapture('/home/caucse/images/ets.mp4')
	#c=1

	#if vc.isOpened():
    	#    rval , frame = vc.read()
	#else:
	#    rval = False

	memory = sysv_ipc.SharedMemory(123463)
	memory2 = sysv_ipc.SharedMemory(123464)
	size = 768, 1024, 3

	pedal = PyMouse()
	pedal.press(1)
	road_center = 320
	while True:
	    #rval, frame = vc.read()
	    #c = c + 1
	    #if c % 6 is 0:
		#    c = c + 1
	    #time.sleep(0.5)
	    cv2.waitKey(1)
	    frameCount = bytearray(memory.read())
	    curve = bytearray(memory2.read())
	    curve = str(struct.unpack('i',curve)[0])
	    m = np.array(frameCount, dtype=np.uint8)
	    orig_img = m.reshape(size)
	    #print orig_img[0]
	    #cv2.imshow('1', m)

	    #true_anno = true_annolist[i]
	    #orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3]
	    #orig_img = imread('/home/caucse/images/1.jpg')
	    #orig_img = m
	    img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic')
	    feed = {x_in: img}
	    (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
	    pred_anno = al.Annotation()
	    #pred_anno.imageName = true_anno.imageName
	    new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,
					    use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed)
	    flag = 0
	    road_center = 320 + int(curve)
	    print(road_center)
	    for rect in rects:
		print(rect.x1, rect.x2, rect.y2)
		if (rect.x1 < road_center and rect.x2 > road_center and rect.y2 > 200) and (rect.x2 - rect.x1 > 30):
			flag = 1

	    if flag is 1:
		pedal.press(2)
		print("break!")
	    else:
		pedal.release(2)
		pedal.press(1)
		print("acceleration!")
		
	    pred_anno.rects = rects
	    pred_anno.imagePath = os.path.abspath(data_dir)
	    pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
	    pred_annolist.append(pred_anno)
	    #imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName))
	    #imname = '/home/caucse/images/_%s.jpg' % (c)
	    cv2.imshow('.jpg', new_img)
	    #misc.imsave(imname, new_img)
	    #if c % 25 == 0:
		#print(c)



 	
        for i in range(len(true_annolist)):
            true_anno = true_annolist[i]
            #orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3]
	    orig_img = imread('/home/caucse/images/1.jpg')
            img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic')
            feed = {x_in: img}
            (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
            pred_anno = al.Annotation()
            pred_anno.imageName = true_anno.imageName
            new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,
                                            use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed)
            
	    for rect in rects:
		print(rect.x1, rect.y1, rect.x2, rect.y2)
            	
            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
            pred_annolist.append(pred_anno)
            #imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName))
	    imname = '/home/caucse/images/_1.jpg'
            misc.imsave(imname, new_img)
            if i % 25 == 0:
                print(i)
    return pred_annolist, true_annolist