Example #1
0
def web(config_files, checkpoint, override_params, host, port, debug):
    if debug:
        tf.logging.set_verbosity(tf.logging.DEBUG)
    else:
        tf.logging.set_verbosity(tf.logging.INFO)

    if checkpoint:
        config = get_checkpoint_config(checkpoint)
    elif config_files:
        config = get_config(config_files)
    else:
        click.echo(
            'Neither checkpoint not config specified, assuming `accurate`.')
        config = get_checkpoint_config('accurate')

    if override_params:
        config = override_config_params(config, override_params)

    # Bounding boxes will be filtered by frontend (using slider), so we set a
    # low threshold.
    if config.model.type == 'fasterrcnn':
        config.model.rcnn.proposals.min_prob_threshold = 0.01
    elif config.model.type == 'ssd':
        config.model.proposals.min_prob_threshold = 0.01
    else:
        raise ValueError("Model type '{}' not supported".format(
            config.model.type))

    # Initialize model
    global NETWORK_START_THREAD
    NETWORK_START_THREAD = Thread(target=start_network, args=(config, ))
    NETWORK_START_THREAD.start()

    app.run(host=host, port=port, debug=debug)
Example #2
0
def web(config_files, checkpoint, override_params, host, port, debug):
    if debug:
        tf.logging.set_verbosity(tf.logging.DEBUG)
    else:
        tf.logging.set_verbosity(tf.logging.INFO)

    if checkpoint:
        config = get_checkpoint_config(checkpoint)
    elif config_files:
        config = get_config(config_files)
    else:
        click.echo('You must specify either a checkpoint or a config file.')
        return

    if override_params:
        config = override_config_params(config, override_params)

    # Bounding boxes will be filtered by frontend (using slider), so we set a
    # low threshold.
    config.model.rcnn.proposals.min_prob_threshold = 0.01

    # Initialize model
    global NETWORK_START_THREAD
    NETWORK_START_THREAD = Thread(target=start_network, args=(config,))
    NETWORK_START_THREAD.start()

    app.run(host=host, port=port, debug=debug)
Example #3
0
    def __init__(self, checkpoint=None, config=None, prob=0.7, classes=None):
        """Instantiate a detector object with the appropriate config.

        Arguments:
            checkpoint (str): Checkpoint id or alias to instantiate the
                detector as.
            config (dict): Configuration parameters describing the desired
                model. See `get_config` to load a config file.

        Note:
            Only one of the parameters must be specified. If none is, we
            default to loading the checkpoint indicated by
            `DEFAULT_CHECKPOINT`.
        """
        if checkpoint is not None and config is not None:
            raise ValueError(
                "Only one of `checkpoint` or `config` must be specified in "
                "order to instantiate a Detector.")

        if checkpoint is None and config is None:
            # Neither checkpoint no config specified, default to
            # `DEFAULT_CHECKPOINT`.
            checkpoint = self.DEFAULT_CHECKPOINT

        if checkpoint:
            config = get_checkpoint_config(checkpoint)

        # Prevent the model itself from filtering its proposals (default
        # value of 0.5 is in use in the configs).
        # TODO: A model should always return all of its predictions. The
        # filtering should be done (if at all) by PredictorNetwork.
        if config.model.type == "fasterrcnn":
            config.model.rcnn.proposals.min_prob_threshold = 0.0
        elif config.model.type == "ssd":
            config.model.proposals.min_prob_threshold = 0.0

        # TODO: Remove dependency on `PredictorNetwork` or clearly separate
        # responsibilities.
        self._network = PredictorNetwork(config)

        self.prob = prob

        # Use the labels when available, integers when not.
        self._model_classes = (self._network.class_labels
                               if self._network.class_labels else list(
                                   range(config.model.network.num_classes)))
        if classes:
            self.classes = set(classes)
            if not set(self._model_classes).issuperset(self.classes):
                raise ValueError(
                    "`classes` must be contained in the detector's classes. "
                    "Available classes are: {}.".format(self._model_classes))
        else:
            self.classes = set(self._model_classes)
Example #4
0
def web(config_files, checkpoint, override_params, host, port, debug, min_prob, save_path):
    global SAVE_PATH_GLOBAL
    if save_path:
        SAVE_PATH_GLOBAL = save_path
    if debug:
        tf.logging.set_verbosity(tf.logging.DEBUG)
    else:
        tf.logging.set_verbosity(tf.logging.INFO)

    if checkpoint:
        config = get_checkpoint_config(checkpoint)
    elif config_files:
        config = get_config(config_files)
    else:
        raise ValueError(
            'Neither checkpoint not config specified, assuming `accurate`.'
        )

    if override_params:
        config = override_config_params(config, override_params)

    # Bounding boxes will be filtered by frontend (using slider), so we set a
    # low threshold.
    if config.model.type == 'fasterrcnn':
        config.model.rcnn.proposals.min_prob_threshold = min_prob
    elif config.model.type == 'ssd':
        config.model.proposals.min_prob_threshold = min_prob
    else:
        raise ValueError(
            "Model type '{}' not supported".format(config.model.type)
        )
    
    # Verfy folder path or create
    try:
        os.stat(SAVE_PATH_GLOBAL)
    except:
        os.mkdir(SAVE_PATH_GLOBAL)    

    # Initialize model
    global NETWORK_START_THREAD
    NETWORK_START_THREAD = Thread(target=start_network, args=(config,))
    NETWORK_START_THREAD.start()

    if debug:
        app.config.from_object('config.DebugConfig')
    else:
        app.config.from_object('config.ProductionConfig')
        
    app.run(host=host, port=port, debug=debug)
Example #5
0
 def inference(self, model, input_dir, score_threshold):
     config = get_checkpoint_config(network, prompt=False)
     if not config:
         return flask.jsonify({'error': 'network not found'})
     try:
         predictor = PredictorNetwork(config)
         paths = os.listdir(input_dir) if os.path.isdir(input_dir) else [
             input_dir
         ]
         for image_file in paths:
             image = Image.open(image_file).conver('RGB')
             predictor.predict_image(image)
     except Exception as e:
         # An error occurred loading the model; interrupt the whole server.
         _thread.interrupt_main()
Example #6
0
    def get_model_convert_output(self, model):

        config = get_checkpoint_config(model)
        network = PredictorNetwork(config)

        output_names = []
        for k, v in network.fetches.iteritems():
            if k in OUTPUT_NAMES:
                if isinstance(v, tf.Tensor):
                    output_names.append(v.name.split(':')[0])
                elif isinstance(v, tuple):
                    for x in v:
                        output_names.append(x.name.split(':')[0])

        logger.debug('Input name: {}\n'.format(network.image_placeholder.name))
        for n in output_names:
            logger.debug('Output name {}'.format(n))
        return network.session, network.session.graph_def, output_names
def predict(path_or_dir, config_files, checkpoint, override_params,
            output_path, save_media_to, min_prob, max_detections, only_class,
            ignore_class, debug):
    """Obtain a model's predictions.

    Receives either `config_files` or `checkpoint` in order to load the correct
    model. Afterwards, runs the model through the inputs specified by
    `path-or-dir`, returning predictions according to the format specified by
    `output`.

    Additional model behavior may be modified with `min-prob`, `only-class` and
    `ignore-class`.
    """
    if debug:
        tf.logging.set_verbosity(tf.logging.DEBUG)
    else:
        tf.logging.set_verbosity(tf.logging.ERROR)

    if only_class and ignore_class:
        click.echo(
            "Only one of `only-class` or `ignore-class` may be specified.")
        return

    # Process the input and get the actual files to predict.
    files = resolve_files(path_or_dir)
    if not files:
        error = 'No files to predict found. Accepted formats are: {}.'.format(
            ', '.join(IMAGE_FORMATS + VIDEO_FORMATS))
        click.echo(error)
        return
    else:
        click.echo('Found {} files to predict.'.format(len(files)))

    # Build the `Formatter` based on the outputs, which automatically writes
    # the formatted output to all the requested output files.
    if output_path == '-':
        output = sys.stdout
    else:
        output = open(output_path, 'w')

    # Create `save_media_to` if specified and it doesn't exist.
    if save_media_to:
        tf.gfile.MakeDirs(save_media_to)

    # Resolve the config to use and initialize the model.
    if checkpoint:
        config = get_checkpoint_config(checkpoint)
    elif config_files:
        config = get_config(config_files)
    else:
        click.echo(
            'Neither checkpoint not config specified, assuming `accurate`.')
        config = get_checkpoint_config('accurate')

    if override_params:
        config = override_config_params(config, override_params)

    # Filter bounding boxes according to `min_prob` and `max_detections`.
    if config.model.type == 'fasterrcnn':
        if config.model.network.with_rcnn:
            config.model.rcnn.proposals.total_max_detections = max_detections
        else:
            config.model.rpn.proposals.post_nms_top_n = max_detections
        config.model.rcnn.proposals.min_prob_threshold = min_prob
    elif config.model.type == 'ssd':
        config.model.proposals.total_max_detections = max_detections
        config.model.proposals.min_prob_threshold = min_prob
    else:
        raise ValueError("Model type '{}' not supported".format(
            config.model.type))

    # Instantiate the model indicated by the config.
    network = PredictorNetwork(config)

    # Iterate over files and run the model on each.
    for file in files:

        # Get the media output path, if media storage is requested.
        save_path = os.path.join(save_media_to, 'pred_{}'.format(
            os.path.basename(file))) if save_media_to else None

        file_type = get_file_type(file)
        predictor = predict_image if file_type == 'image' else predict_video

        objects = predictor(
            network,
            file,
            only_classes=only_class,
            ignore_classes=ignore_class,
            save_path=save_path,
        )

        # TODO: Not writing jsons for video files for now.
        if objects is not None and file_type == 'image':
            output.write(
                json.dumps({
                    'file': file,
                    'objects': objects,
                }) + '\n')

    output.close()
from luminoth.tools.checkpoint import get_checkpoint_config
from luminoth.utils.predicting import PredictorNetwork
from PIL import Image as pilimage

# This program  will predict the location of the tables in an image
# It outputs the coordinates of the tables. Using these coordinates we can cut the table portion of the image and use it for further processing

input_file = '/usr/local/table-detection-from-images-using-deep-learning-master/test_image_with_table.png'
# Specify the luminoth checkpoint here
checkpoint = 'c2df81db49e0'

config = get_checkpoint_config(checkpoint)
network = PredictorNetwork(config)
image = pilimage.open(input_file).convert('RGB')
objects = network.predict_image(image)

print("NO OF TABLES IDENTIFIED BY LUMINOTH = " + str(len(objects)))
print('-' * 100)

table_counter = 1

for i in range(len(objects)):
    table_idctionary = objects[i]
    coordinate_list = table_idctionary["bbox"]
    xminn = coordinate_list[0]
    yminn = coordinate_list[1]
    xmaxx = coordinate_list[2]
    ymaxx = coordinate_list[3]
    print('TABLE ' + str(table_counter) + ':')
    print('-' * 100)
    print("xminn = " + str(xminn))
Example #9
0
def load_detector():
    config = get_checkpoint_config(settings.LUMI_CHECKPOINT)
    detector = PredictorNetwork(config)
    return detector
Example #10
0
def predict(path_or_dir, config_files, checkpoint, override_params, output_dir,
            save, min_prob, ignore_classes, debug):
    if debug:
        tf.logging.set_verbosity(tf.logging.DEBUG)
    else:
        tf.logging.set_verbosity(tf.logging.INFO)

    # Get file paths
    if tf.gfile.IsDirectory(path_or_dir):
        file_paths = [
            os.path.join(path_or_dir, f)
            for f in tf.gfile.ListDirectory(path_or_dir)
            if get_filetype(f) in ('image', 'video')
        ]
    else:
        if get_filetype(path_or_dir) in ('image', 'video'):
            file_paths = [path_or_dir]
        else:
            file_paths = []

    errors = 0
    successes = 0
    created_files_paths = []
    total_files = len(file_paths)
    if total_files == 0:
        no_files_message = ("No images or videos found. "
                            "Accepted formats -> Image: {} - Video: {}")
        tf.logging.error(no_files_message.format(IMAGE_FORMATS, VIDEO_FORMATS))
        exit()

    # Resolve the config to use and initialize the mdoel.
    if checkpoint:
        config = get_checkpoint_config(checkpoint)
    elif config_files:
        config = get_config(config_files)
    else:
        click.echo('You must specify either a checkpoint or a config file.')
        exit()

    if override_params:
        config = override_config_params(config, override_params)

    network = PredictorNetwork(config)

    # Create output_dir if it doesn't exist
    if output_dir:
        tf.gfile.MakeDirs(output_dir)

    tf.logging.info('Getting predictions for {} files'.format(total_files))

    # Iterate over file paths
    for file_path in file_paths:

        save_path = 'pred_' + os.path.basename(file_path)
        if output_dir:
            save_path = os.path.join(output_dir, save_path)

        if get_filetype(file_path) == 'image':
            click.echo('Predicting {}...'.format(file_path))
            with tf.gfile.Open(file_path, 'rb') as f:
                try:
                    image = Image.open(f).convert('RGB')
                except (tf.errors.OutOfRangeError, OSError) as e:
                    tf.logging.warning('Error: {}'.format(e))
                    tf.logging.warning("Couldn't open: {}".format(file_path))
                    errors += 1
                    continue

            # Run image through network
            prediction = network.predict_image(image)
            successes += 1

            # Filter results if required by user
            if ignore_classes:
                prediction = filter_classes(prediction, ignore_classes)

            # Save prediction json file
            with open(save_path + '.json', 'w') as outfile:
                json.dump(prediction, outfile)
            created_files_paths.append(save_path + '.json')

            # Save predicted image
            if save:
                with tf.gfile.Open(file_path, 'rb') as im_file:
                    image = Image.open(im_file)
                    draw_bboxes_on_image(image, prediction, min_prob)
                    image.save(save_path)
                created_files_paths.append(save_path)

        elif get_filetype(file_path) == 'video':
            # NOTE: We'll hardcode the video ouput to mp4 for the time being
            save_path = os.path.splitext(save_path)[0] + '.mp4'
            try:
                writer = skvideo.io.FFmpegWriter(save_path)
            except AssertionError as e:
                tf.logging.error(e)
                tf.logging.error(
                    "Please install ffmpeg before making video predictions.")
                exit()
            num_of_frames = int(
                skvideo.io.ffprobe(file_path)['video']['@nb_frames'])
            video_progress_bar = click.progressbar(
                skvideo.io.vreader(file_path),
                length=num_of_frames,
                label='Predicting {}'.format(file_path))
            with video_progress_bar as bar:
                try:
                    for frame in bar:
                        # Run image through network
                        prediction = network.predict_image(frame)

                        # Filter results if required by user
                        if ignore_classes:
                            prediction = filter_classes(
                                prediction, ignore_classes)

                        image = Image.fromarray(frame)
                        draw_bboxes_on_image(image, prediction, min_prob)
                        writer.writeFrame(np.array(image))
                except RuntimeError as e:
                    click.echo()  # Error prints next to progress-bar if not
                    tf.logging.error('Error: {}'.format(e))
                    tf.logging.error('Corrupt videofile: {}'.format(file_path))
                    tf.logging.error(
                        'Partially processed video file saved in {}'.format(
                            save_path))
                    errors += 1

            writer.close()
            created_files_paths.append(save_path)

        else:
            tf.logging.warning("{} isn't an image/video".format(file_path))

    # Generate logs
    tf.logging.info("Created the following files: {}".format(
        ', '.join(created_files_paths)))

    if errors:
        tf.logging.warning('{} errors.'.format(errors))