示例#1
0
    def load_images_for_keras(
            image_dir: str = "../images/asl_alphabet_train/asl_alphabet_train",
            color_mode: str = "grayscale",
            image_size: Tuple[int, int] = (80, 80),
            validation_split: float = .2,
            seed: int = 1234321) -> Tuple[any, any]:
        """
        Get the Keras dataset for the images.

        :param image_dir: The directory of the images
        :param color_mode: Whether or not to grayscale
        :param image_size: The size images should be scaled to
        :param validation_split: How to split the training and validation sets by
        :param seed: Random seed
        :return: A tuple of the training and validation datasets
        """
        train = image_dataset_from_directory(image_dir,
                                             labels="inferred",
                                             color_mode=color_mode,
                                             image_size=image_size,
                                             validation_split=validation_split,
                                             subset="training",
                                             seed=seed)
        valid = image_dataset_from_directory(image_dir,
                                             labels="inferred",
                                             color_mode=color_mode,
                                             image_size=image_size,
                                             validation_split=validation_split,
                                             subset="validation",
                                             seed=seed)
        return train, valid
示例#2
0
BASE_PATH = "./data"
# define the batch size
BATCH_SIZE = 64
OUTPUT_PATH = path.sep.join([BASE_PATH, "output"])
img_height = 128
img_width = 64
data_aug = keras.preprocessing.image.ImageDataGenerator(zoom_range=.1,
                                                        horizontal_flip=True,
                                                        rotation_range=8,
                                                        width_shift_range=.2,
                                                        height_shift_range=.2)
train_ds = pc.image_dataset_from_directory(BASE_PATH + "/bbox_train",
                                           validation_split=0.2,
                                           label_mode="categorical",
                                           subset="training",
                                           seed=123,
                                           shuffle=False,
                                           image_size=(256, 256),
                                           batch_size=BATCH_SIZE)
val_ds = pc.image_dataset_from_directory(BASE_PATH + "/bbox_test",
                                         validation_split=0.2,
                                         subset="validation",
                                         seed=123,
                                         image_size=(img_height, img_width),
                                         batch_size=BATCH_SIZE)
class_names = train_ds.class_names
print(class_names)

normalization_layer = layers.experimental.preprocessing.Rescaling(1. / 255)
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
示例#3
0
data_dir = keras.utils.get_file(origin=dataset_url, fname="BSR", untar=True)
root_dir = os.path.join(data_dir, "BSDS500/data")
"""
We create training and validation datasets via `image_dataset_from_directory`.
"""

crop_size = 300
upscale_factor = 3
input_size = crop_size // upscale_factor
batch_size = 4

train_ds = image_dataset_from_directory(
    root_dir,
    batch_size=batch_size,
    image_size=(crop_size, crop_size),
    validation_split=0.2,
    subset="training",
    seed=1337,
    label_mode=None,
)

valid_ds = image_dataset_from_directory(
    root_dir,
    batch_size=batch_size,
    image_size=(crop_size, crop_size),
    validation_split=0.2,
    subset="validation",
    seed=1337,
    label_mode=None,
)
"""
示例#4
0
from keras.preprocessing import image_dataset_from_directory

image_dataset_from_directory(
    '/tf/notebooks/Keum/data/train/',
    labels="inferred",
    label_mode="int",
    class_names=None,
    color_mode="rgb",
    batch_size=32,
    image_size=(256, 256),
    shuffle=True,
    seed=None,
    validation_split=None,
    subset=None,
    interpolation="bilinear",
    follow_links=False,
)
from keras.callbacks import EarlyStopping
from sklearn.utils import class_weight
from keras.optimizers import SGD
from sklearn.svm import SVC
from sklearn import metrics
import numpy as np
import tensorflow as tf

#Path do dataset
batch = 1
train_path = 'C:/Users/Labmint/Documents/Visao/base_padding/training_set'
test_path = 'C:/Users/Labmint/Documents/Visao/base_padding/validation_set'

train_set = image_dataset_from_directory(train_path,
                                         batch_size=batch,
                                         labels='inferred',
                                         label_mode='int',
                                         image_size=(355, 370),
                                         shuffle=False)
test_set = image_dataset_from_directory(test_path,
                                        batch_size=batch,
                                        labels='inferred',
                                        label_mode='int',
                                        image_size=(355, 370),
                                        shuffle=False)

#Carregando a rede
#As outras redes podem ser importadas VGG16, MobileNetV2
model = EfficientNetB6(include_top=False,
                       input_shape=(355, 370, 3),
                       pooling='avg',
                       weights='imagenet')
示例#6
0
# %%
from keras.preprocessing import image_dataset_from_directory
from tensorflow import data
AUTOTUNE = data.AUTOTUNE

image_height = 64
image_width = 64
batch_size = 32

dataset = 'isochronous-dataset'

X_raw_train = image_dataset_from_directory(dataset,
                                           validation_split=0.2,
                                           subset='training',
                                           seed=0,
                                           image_size=(image_height,
                                                       image_width),
                                           color_mode='grayscale',
                                           batch_size=batch_size)
X_raw_test = image_dataset_from_directory(dataset,
                                          validation_split=0.2,
                                          subset='validation',
                                          seed=0,
                                          image_size=(image_height,
                                                      image_width),
                                          color_mode='grayscale',
                                          batch_size=batch_size)

class_names = X_raw_train.class_names
print(class_names)
# %%