Ejemplo n.º 1
0
def preview_synthetic_training_data(data,
                                    target,
                                    backgrounds=None,
                                    verbose=True,
                                    **kwargs):
    """
    A utility function to visualize the synthetically generated data.

    Parameters
    ----------
    data : SFrame | tc.Image
        A single starter image or an SFrame that contains the starter images
        along with their corresponding labels.  These image(s) can be in either
        RGB or RGBA format. They should not be padded.

    target : string
        Name of the target (when data is a single image) or the target column
        name (when data is an SFrame of images).

    backgrounds : optional SArray
        A list of backgrounds used for synthetic data generation. When set to
        None, a set of default backgrounds are downloaded and used.

    verbose : bool optional
        If True, print progress updates and details.

    Returns
    -------
    out : SFrame
        An SFrame of sythetically generated annotated training data.
    """
    dataset_to_augment, image_column_name, target_column_name = check_one_shot_input(
        data, target, backgrounds)
    _tkutl._handle_missing_values(dataset_to_augment, image_column_name,
                                  "dataset")

    if backgrounds is None:
        backgrounds_downloader = _data_zoo.OneShotObjectDetectorBackgroundData(
        )
        backgrounds = backgrounds_downloader.get_backgrounds()
        # We resize the background dimensions by half along each axis to reduce
        # the disk footprint during augmentation, and also reduce the time
        # taken to synthesize data.
        backgrounds = backgrounds.apply(lambda im: _tc.image_analysis.resize(
            im, int(im.width / 2), int(im.height / 2), im.channels))
    # Option arguments to pass in to C++ Object Detector, if we use it:
    # {'mlmodel_path':'darknet.mlmodel', 'max_iterations' : 25}
    seed = kwargs["seed"] if "seed" in kwargs else _random.randint(
        0, 2**32 - 1)
    options_for_augmentation = {"seed": seed, "verbose": verbose}

    one_shot_model = _extensions.one_shot_object_detector()
    augmented_data = one_shot_model.augment(
        dataset_to_augment,
        image_column_name,
        target_column_name,
        backgrounds,
        options_for_augmentation,
    )
    return augmented_data
Ejemplo n.º 2
0
def preview_synthetic_training_data(data,
                                    target,
                                    backgrounds=None,
                                    verbose=True,
                                    **kwargs):
    """
    A utility function to visualize the synthetically generated data.

    Parameters
    ----------
    data : SFrame | tc.Image
        A single starter image or an SFrame that contains the starter images
        along with their corresponding labels.  These image(s) can be in either
        RGB or RGBA format. They should not be padded.

    target : string
        Name of the target (when data is a single image) or the target column
        name (when data is an SFrame of images).

    backgrounds : optional SArray
        A list of backgrounds used for synthetic data generation. When set to
        None, a set of default backgrounds are downloaded and used.

    Returns
    -------
    out : SFrame
        An SFrame of sythetically generated annotated training data.
    """
    dataset_to_augment, image_column_name, target_column_name = check_one_shot_input(
        data, target, backgrounds)
    _tkutl._handle_missing_values(dataset_to_augment, image_column_name,
                                  'dataset')
    one_shot_model = _extensions.one_shot_object_detector()
    seed = kwargs["seed"] if "seed" in kwargs else _random.randint(
        0, 2**32 - 1)
    if backgrounds is None:
        backgrounds_downloader = _data_zoo.OneShotObjectDetectorBackgroundData(
        )
        backgrounds_tar_path = backgrounds_downloader.get_backgrounds_path()
        backgrounds_tar = _tarfile.open(backgrounds_tar_path)
        backgrounds_tar.extractall()
        backgrounds = _tc.SArray("one_shot_backgrounds.sarray")
    # Option arguments to pass in to C++ Object Detector, if we use it:
    # {'mlmodel_path':'darknet.mlmodel', 'max_iterations' : 25}
    options_for_augmentation = {"seed": seed, "verbose": verbose}
    augmented_data = one_shot_model.augment(dataset_to_augment,
                                            image_column_name,
                                            target_column_name, backgrounds,
                                            options_for_augmentation)
    return augmented_data
def create(dataset,
           target,
           backgrounds=None,
           feature=None,
           batch_size=0,
           max_iterations=0,
           seed=None,
           verbose=True):
    model = _extensions.one_shot_object_detector()
    if seed is None: seed = _random.randint(0, 2**32 - 1)
    if backgrounds is None:
        # replace this with loading backgrounds from developer.apple.com
        backgrounds = _tc.SArray()
    # Option arguments to pass in to C++ Object Detector, if we use it:
    # {'mlmodel_path':'darknet.mlmodel', 'max_iterations' : 25}
    augmented_data = model.augment(dataset, target, backgrounds, {"seed":seed})
    od_model = _tc.object_detector.create(augmented_data)
    state = {'detector':od_model}
    return OneShotObjectDetector(state)