def predict_image(network,
                  path,
                  only_classes=None,
                  ignore_classes=None,
                  save_path=None):
    click.echo('Predicting {}...'.format(path), nl=False)

    # Open and read the image to predict.
    with tf.gfile.Open(path, 'rb') as f:
        try:
            image = Image.open(f).convert('RGB')
        except (tf.errors.OutOfRangeError, OSError) as e:
            click.echo()
            click.echo('Error while processing {}: {}'.format(path, e))
            return

    # Run image through the network.
    objects = network.predict_image(image)

    # Filter the results according to the user input.
    objects = filter_classes(objects,
                             only_classes=only_classes,
                             ignore_classes=ignore_classes)

    # Save predicted image.
    if save_path:
        vis_objects(np.array(image), objects).save(save_path)

    click.echo(' done.')
    return objects
Ejemplo n.º 2
0
def predict(model_name):
    if request.method == 'GET':
        return jsonify(error='Use POST method to send image.'), 400

    try:
        image_array = get_image2()
    except ValueError:
        return jsonify(error='Missing image'), 400
    except OSError:
        return jsonify(error='Incompatible file type'), 400

    total_predictions = request.args.get('total')
    if total_predictions is not None:
        try:
            total_predictions = int(total_predictions)
        except ValueError:
            total_predictions = None

    # Wait for the model to finish loading.
    NETWORK_START_THREAD.join()

    objects = PREDICTOR_NETWORK.predict_image(image_array)
    image = request.files.get('image')
    if not image:
        raise ValueError

    img = Image.open(image.stream).convert('RGB')
    # vis_objects(np.array(image_array), objects).save("/tmp/luminoth/data.png")
    vis_objects(np.array(img), objects).save("/tmp/luminoth/data.png")
    global ouputObjects
    ouputObjects = objects
    objects = objects[:total_predictions]

    return jsonify({'objects': objects})
def predict_video(network,
                  path,
                  only_classes=None,
                  ignore_classes=None,
                  save_path=None):
    if save_path:
        # We hardcode the video output to mp4 for the time being.
        save_path = os.path.splitext(save_path)[0] + '.mp4'
        try:
            writer = skvideo.io.FFmpegWriter(save_path)
        except AssertionError as e:
            tf.logging.error(e)
            tf.logging.error(
                'Please install ffmpeg before making video predictions.')
            exit()
    else:
        click.echo(
            'Video not being saved. Note that for the time being, no JSON '
            'output is being generated. Did you mean to specify `--save-path`?'
        )

    num_of_frames = int(skvideo.io.ffprobe(path)['video']['@nb_frames'])

    video_progress_bar = click.progressbar(skvideo.io.vreader(path),
                                           length=num_of_frames,
                                           label='Predicting {}'.format(path))

    colormap = build_colormap()

    objects_per_frame = []
    with video_progress_bar as bar:
        try:
            start_time = time.time()
            for idx, frame in enumerate(bar):
                # Run image through network.
                objects = network.predict_image(frame)

                # Filter the results according to the user input.
                objects = filter_classes(objects,
                                         only_classes=only_classes,
                                         ignore_classes=ignore_classes)

                objects_per_frame.append({'frame': idx, 'objects': objects})

                # Draw the image and write it to the video file.
                if save_path:
                    image = vis_objects(frame, objects, colormap=colormap)
                    writer.writeFrame(np.array(image))

            stop_time = time.time()
            click.echo('fps: {0:.1f}'.format(num_of_frames /
                                             (stop_time - start_time)))
        except RuntimeError as e:
            click.echo()  # Error prints next to progress bar otherwise.
            click.echo('Error while processing {}: {}'.format(path, e))
            if save_path:
                click.echo('Partially processed video file saved in {}'.format(
                    save_path))

    if save_path:
        writer.close()

    return objects_per_frame
Ejemplo n.º 4
0
def predict(model_name):
    try:
        # TODO ADD more models
        if request.method == 'GET':
            print(request.args)
            #return jsonify(error='Use POST method to send image.', count=-1)
            total_predictions = request.args.get('total', None)
            min_prob = request.args.get('min_prob', None)
            only_number = request.args.get('only_number', False)
            id_task = request.args.get('id', False)
            
            try:
                image_array = get_image_url()
            except ValueError:
                return jsonify(error='Missing image', count=-2)
            except OSError:
                return jsonify(error='Incompatible file type', count=-3)
        else: #POST
            total_predictions = request.form.get('total', None)
            min_prob = request.form.get('min_prob', None)
            only_number = request.form.get('only_number', False)
            id_task = request.form.get('id', False)
            try:
                image_array = get_image()
            except ValueError:
                return jsonify(error='Missing image', count=-2)
            except OSError:
                return jsonify(error='Incompatible file type', count=-3)

        if total_predictions is not None:
            try:
                total_predictions = int(total_predictions)
            except ValueError:
                total_predictions = None
        if min_prob is not None:
            try:
                min_prob = float(min_prob)
            except ValueError:
                min_prob = None
        if only_number == "False":
            only_number = None
        
        if not id_task:
            return jsonify(error='Missing task_id', count=-4)


        

        # Wait for the model to finish loading.
        NETWORK_START_THREAD.join()

        objects = PREDICTOR_NETWORK.predict_image(image_array)

        if min_prob:
            objects = [obj for obj in objects if obj['prob'] >= min_prob]

        if total_predictions:
            objects = objects[:total_predictions]

        # Save predicted image.
        if SAVE_PATH_GLOBAL:
            print("{}{}_Counted.jpg".format(SAVE_PATH_GLOBAL,id_task))
            vis_objects(np.array(image_array), objects).save("{}{}_Counted.jpg".format(SAVE_PATH_GLOBAL,id_task))

        if only_number:
            return jsonify({'count': len(objects)})
        
        return jsonify({'objects': objects, 'count': len(objects)})
    except Exception as e:
        return jsonify(error='Unkown error', data=str(e), count=-666)