Ejemplo n.º 1
0
def data_gen(input_path_root, target_path_root, batch_size):
    input_dir_names = os.listdir(input_path_root)
    target_dir_names = os.listdir(target_path_root)
    # 共通のディレクトリのみを対象とする
    dir_names = set(input_dir_names) & set(target_dir_names)

    path_pairs = []
    for dir_name in dir_names:
        input_dir = os.path.join(input_path_root, dir_name)
        target_dir = os.path.join(target_path_root, dir_name)

        i_pathes = sorted(list_pictures(input_dir))
        t_pathes = sorted(list_pictures(target_dir))

        path_pairs += zip(i_pathes, t_pathes)

    n_batch = math.ceil(len(path_pairs) / batch_size)

    while True:
        for i in range(n_batch):
            batch_path_pairs = path_pairs[i * batch_size:(i + 1) * batch_size]
            batch_size = len(batch_path_pairs)

            batch_input_img = np.empty(shape=(batch_size, 256, 256, 1))
            batch_target_img = np.empty(shape=(batch_size, 256, 256, 1))
            for i, path_pair in enumerate(batch_path_pairs):
                x = load_scaled_img(path_pair[0])
                x -= np.mean(x, keepdims=True)
                x /= (np.std(x, keepdims=True) + K.epsilon())
                batch_input_img[i] = x

                batch_target_img[i] = load_scaled_img(path_pair[1])

            yield batch_input_img, batch_target_img
Ejemplo n.º 2
0
def prepare_train():
    X = []
    Y = []

    # 対象Aの画像
    for picture in list_pictures('./ordinary/'):
        img = img_to_array(load_img(picture, target_size=(224, 224)))
        X.append(img)

        Y.append(0)

    # 対象Bの画像
    for picture in list_pictures('./jiro/'):
        img = img_to_array(load_img(picture, target_size=(224, 224)))
        X.append(img)

        Y.append(1)

    # arrayに変換
    X = np.asarray(X)
    Y = np.asarray(Y)
    # 画素値を0から1の範囲に変換
    X = X.astype('float32')
    X = X / 255.0

    # クラスの形式を変換
    Y = np_utils.to_categorical(Y, 2)

    # 学習用データとテストデータ
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.33,
                                                        random_state=111)
    return X_train, X_test, y_train, y_test
Ejemplo n.º 3
0
    def __init__(self,
                 root_dir,
                 classes=None,
                 mean=IMAGENET_MEAN,
                 std=IMAGENET_STD,
                 color_mode="rgb"):
        """ ILSVRC data generator.

        # Arguments:

        - root_dir: Root directory of the ILSVRC dataset, containing directories "ILSVRC2012_img_train" and "ILSVRC2012_img_val", both containing
                    sub-directories with names of synsets and the images for each synset in the corresponding sub-directories.

        - classes: List of synsets to restrict the dataset to. Numeric labels will be assigned to these synsets in ascending order.
                   If set to `None`, all available synsets will be used and enumerated in the lexicographical order.
        
        - mean: Channel-wise image mean for normalization (in "RGB" order). If set to `None`, mean and standard deviation will be computed from the images.

        - std: Channel-wise standard deviation for normalization (in "RGB" order). If set to `None`, standard deviation will be computed from the images.

        - color_mode: Image color mode, either "rgb" or "bgr".
        """

        super(ILSVRCGenerator, self).__init__(root_dir,
                                              classes,
                                              default_target_size=256,
                                              randzoom_range=(256, 480),
                                              color_mode=color_mode)
        self.train_dir = os.path.join(self.root_dir, 'ILSVRC2012_img_train')
        self.test_dir = os.path.join(self.root_dir, 'ILSVRC2012_img_val')

        # Search for classes
        if classes is None:
            classes = []
            for subdir in sorted(os.listdir(self.train_dir)):
                if os.path.isdir(os.path.join(self.train_dir, subdir)):
                    classes.append(subdir)
        self.classes = classes
        self.class_indices = dict(zip(self.classes, range(len(self.classes))))

        # Search for images
        for lbl, subdir in enumerate(self.classes):
            cls_files = sorted(
                list_pictures(os.path.join(self.train_dir, subdir),
                              'JPE?G|jpe?g'))
            self.train_img_files += cls_files
            self._train_labels += [lbl] * len(cls_files)
            cls_files = sorted(
                list_pictures(os.path.join(self.test_dir, subdir),
                              'JPE?G|jpe?g'))
            self.test_img_files += cls_files
            self._test_labels += [lbl] * len(cls_files)
        print('Found {} training and {} validation images from {} classes.'.
              format(self.num_train, self.num_test, self.num_classes))

        # Compute mean and standard deviation
        self._compute_stats(mean, std)
Ejemplo n.º 4
0
def load_images(dir, grayscale=False):
    imgs = []
    for i in image_utils.list_pictures(dir):
        imgs.append(
            image_utils.img_to_array(
                image_utils.load_img(i,
                                     grayscale=grayscale,
                                     target_size=i_shape)))
    return image_utils.list_pictures(dir), np.array(imgs)
Ejemplo n.º 5
0
def data_gen(path_dir, batch_size):
    dir_names = sorted(os.listdir(path_dir))
    dir_pathes = [os.path.join(path_dir, d) for d in dir_names]

    n_pair = len(dir_names) - 1
    n_batches = math.ceil(n_pair / batch_size)

    while True:
        for i in range(n_batches):
            start = i * batch_size
            end = start + batch_size

            if end > n_pair:
                end = n_pair

            file_pathes_list = [
                list_pictures(d) for d in dir_pathes[start:end]
            ]
            max_page_len = max([len(ps) for ps in file_pathes_list])
            enc_inputs = [
                load_img_batch(p, max_page_len) for p in file_pathes_list
            ]
            batch_enc_inputs = np.empty(shape=(len(enc_inputs), max_page_len,
                                               256, 256, 1))
            for i, e in enumerate(enc_inputs):
                batch_enc_inputs[i] = e

            file_pathes_list = [
                list_pictures(d) for d in dir_pathes[start + 1:end + 1]
            ]
            max_page_len = max([len(ps) for ps in file_pathes_list])

            # BOSあり
            reshaped_bos = BOS.reshape(1, 256, 256, 1)
            dec_inputs = [
                np.vstack((reshaped_bos, load_img_batch(p, max_page_len)))
                for p in file_pathes_list
            ]

            batch_dec_inputs = np.empty(shape=(len(dec_inputs), max_page_len,
                                               256, 256, 1))
            for i, d in enumerate(dec_inputs):
                batch_dec_inputs[i] = d[:-1]

            batch_dec_outputs = np.empty(shape=(len(dec_inputs), max_page_len,
                                                256, 256, 1))
            for i, d in enumerate(dec_inputs):
                batch_dec_outputs[i] = d[1:]

            yield [batch_enc_inputs, batch_dec_inputs], batch_dec_outputs
Ejemplo n.º 6
0
def main():
    X_train = []
    y_train = []
    X_test = []
    y_test = []

    for picture in list_pictures('./roboint/moji/train/pos/'):
        img = img_to_array(
            load_img(picture, target_size=(image_size, image_size)))
        X_train.append(img)
        y_train.append(0)

    for picture in list_pictures('./roboint/moji/train/neg/'):
        img = img_to_array(
            load_img(picture, target_size=(image_size, image_size)))
        X_train.append(img)
        y_train.append(1)

    for picture in list_pictures('./roboint/moji/test/pos/'):
        img = img_to_array(
            load_img(picture, target_size=(image_size, image_size)))
        X_test.append(img)
        y_test.append(0)

    for picture in list_pictures('./roboint/moji/test/neg/'):
        img = img_to_array(
            load_img(picture, target_size=(image_size, image_size)))
        X_test.append(img)
        y_test.append(1)

    X_train = np.array(X_train)
    y_train = np.array(y_train)
    X_test = np.array(X_test)
    y_test = np.array(y_test)

    X_train = X_train.astype('float32')
    X_train = X_train / 255.0
    X_test = X_test.astype('float32')
    X_test = X_test / 255.0
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)

    # モデルを訓練し評価する
    model = model_train(X_train, y_train)
    model_eval(model, X_test, y_test)

    #おまけ(テスト画像の予想を出力)
    """
def load_images(image_dir, W, H, image_array_divided_by=1):
    from keras.preprocessing import image

    # Get list of all files asuming that files are image type only
    list_of_files = image.list_pictures(image_dir)
    print("Count of image files read is ", len(list_of_files))

    # Iterate over each images and read their sizes
    list_images = []
    for img_name in list_of_files:  #[0:10] img_name = list_of_files[0]
        img_path = img_name
        img = np.NaN
        #img = image.load_img(img_path, target_size=(W, H))
        if W > 0 and H > 0:
            img = image.load_img(img_path, target_size=(W, H))
        else:
            img = image.load_img(img_path)

        x = image.img_to_array(img)
        x /= image_array_divided_by
        list_images.append(x)
        del (img, x)

    # Stack to have one complete list as required by NN
    train = np.stack(list_images)

    # Cleaning
    del (list_images)

    # Return all required
    return ({"train": train, "list_of_files": list_of_files})
Ejemplo n.º 8
0
def generator(img_dir, cap_path, batch_size, max_len=30):
    fh = open(cap_path)
    raw_data = fh.read()
    data = json.loads(raw_data)

    img_groups = {}
    all_captions = []
    for img_group in data['images']:
        img_groups[img_group['filename']] = []

        for sentence in img_group['sentences']:
            img_groups[img_group['filename']].append(sentence['raw'])
            all_captions.append(sentence['raw'])

    preprocessor = CaptionPreprocessor(rare_words_handling='nothing')
    preprocessor.fit_on_captions(all_captions)

    image_files = list_pictures(img_dir)

    image_processor = ImagePreprocessor(is_training=True, img_size=(299, 299))
    '''
    global img_array
    if os.path.exists('./img_array.npy'):
        img_array = np.load('./img_array.npy')
    else:
        print("Preprocessing images...\n")
        img_array = image_processor.process_images(image_files)
        print("\nImages preprocessed.")
        np.save('./img_array', img_array)
    '''

    return ImgSequence(img_groups, image_files, img_dir, batch_size,
                       (image_processor, preprocessor), max_len)
Ejemplo n.º 9
0
def extract_inception():
    model = InceptionV3(weights='imagenet', include_top=False)
    print(model.summary())

    X_dirname = '../../411a3/train'
    Y_filename = '../../411a3/train.csv'
    X_filelist = image.list_pictures(X_dirname)
    Y_list = np.loadtxt(Y_filename, dtype='str', delimiter=',')[1:]

    X_inception = np.zeros((train_size, 2048, 8, 8))
    y_inception = Y_list[:, 1].astype('int64').reshape(-1, 1) - 1

    for i in range(train_size):
        img = image.load_img(X_filelist[i], target_size=target_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        inception = model.predict(x)
        X_inception[i, :, :, :] = inception
        print('Read image: ' + X_filelist[i])

    # shuffle inputs and targets
    rnd_idx = np.arange(X_inception.shape[0])
    np.random.shuffle(rnd_idx)
    X_train = X_inception[rnd_idx]
    y_train = y_inception[rnd_idx]

    return X_train, y_train
Ejemplo n.º 10
0
def load_image(nb_test=0):
    X_dirname = '../411a3/train'
    Y_filename = '../411a3/train.csv'
    X_filelist = list_pictures(X_dirname)
    Y_list = np.loadtxt(Y_filename, dtype='str', delimiter=',')[1:]

    X_train = np.zeros((train_size, image_channel, image_size, image_size))
    y_train = Y_list[:, 1].astype('int64').reshape(-1, 1) - 1

    for i in range(train_size):
        img = load_img(X_filelist[i])
        x = img_to_array(img)
        x = np.expand_dims(x, axis=0)
        X_train[i, :, :, :] = x
        print('Read image: ' + X_filelist[i])

    # shuffle inputs and targets
    rnd_idx = np.arange(X_train.shape[0])
    np.random.shuffle(rnd_idx)
    X_train = X_train[rnd_idx]
    y_train = y_train[rnd_idx]

    if K.image_dim_ordering() == 'tf':
        X_train = X_train.transpose(0, 2, 3, 1)

    # print('X_train shape:', X_train.shape)
    # print('y_train shape:', y_train.shape)

    return (X_train[nb_test:], y_train[nb_test:]), (X_train[:nb_test], y_train[:nb_test])
Ejemplo n.º 11
0
def getImage():
    X = []
    for picture in list_pictures('./eroimages/'):
        img = img_to_array(load_img(picture, target_size=(128, 128, 3)))
        X.append(img)

    npX = np.array(X)
    return npX
Ejemplo n.º 12
0
 def setUp(self):
     self.motion_detection_test_dir = path.join(temp_data_dir,
                                                'motion_detection')
     self.images = list_pictures(self.motion_detection_test_dir)
     with open(path.join(self.motion_detection_test_dir,
                         'annotations')) as f:
         self.annotations = [line.split(' ')[1].strip('\n') for line in f]
     self.positive_annotation = 'person_visible'
     self.negative_annotation = 'no_person_visible'
Ejemplo n.º 13
0
def getImage():
    X = []
    for picture in list_pictures('./gazo/'):
        img = img_to_array(load_img(picture, target_size=(96, 96, 3)))
        X.append(img)
        if len(X) % 100 == 0:
            print(len(X))

    npX = np.array(X)
    return npX
Ejemplo n.º 14
0
def load_mnist(width, height, channels):
    X = []
    for image in list_pictures('./MNIST/0/'):
        img = img_to_array(
            load_img(image, grayscale=True, target_size=(28, 28)))
        X.append(img)
    X = np.asarray(X)
    X = (X.astype(np.float32) - 127.5) / 127.5
    img_rows = width
    img_cols = height
    img_shape = [img_rows, img_cols, channels]
    return X, img_shape
Ejemplo n.º 15
0
def load_data():
    x_data = []
    y_data = []

    # appleの画像
    for picture in list_pictures('./dataset/apple/'):
        img = img_to_array(load_img(picture, target_size=img_size))
        x_data.append(img)
        y_data.append(0)

    # bananaの画像
    for picture in list_pictures('./dataset/banana/'):
        img = img_to_array(load_img(picture, target_size=img_size))
        x_data.append(img)
        y_data.append(1)

    # orangeの画像
    for picture in list_pictures('./dataset/orange/'):
        img = img_to_array(load_img(picture, target_size=img_size))
        x_data.append(img)
        y_data.append(2)

    # arrayに変換
    x_data = np.asarray(x_data)
    y_data = np.asarray(y_data)

    # 画素値を0から1の範囲に変換
    x_data = x_data.astype('float32')
    x_data = x_data / 255.0

    # クラスの形式を変換
    y_data = np_utils.to_categorical(y_data, category_size)

    # 学習用データとテストデータの振り分け
    x_train, x_test, y_train, y_test = train_test_split(x_data,
                                                        y_data,
                                                        test_size=0.33,
                                                        random_state=123)

    return x_train, x_test, y_train, y_test
Ejemplo n.º 16
0
def load_dataset(directory, crop_size=320, batch_size=32):
    """
    load data from directory
    :param directory: jpg files directory
    :param crop_size: cropped image size
    :param batch_size: batch size
    :return: python generator object, a batch training data, img, y_cls_mask_lable, y_regr_cls_mask_label
    """
    jpg_list = list_pictures(directory, 'jpg')
    generator = img_txtreg_generator(jpg_list, crop_size, scale=1 / 255.0)
    generator = image_ylabel_generator(generator)
    generator = group_by_batch(generator, batch_size)
    return generator
Ejemplo n.º 17
0
def load_val():
    X_dirname = '../411a3/val'
    X_filelist = image.list_pictures(X_dirname)

    X_val = np.zeros((970, gist_size))

    for i in range(970):
        im = Image.open(X_filelist[i])
        descriptors = leargist.color_gist(im)
        X_val[i, :] = descriptors
        print('Load image: ' + X_filelist[i])

    return X_val
Ejemplo n.º 18
0
    def train(self):

        images = list_pictures(self._image_path)

        num_image = len(images)

        total_seq = num_image * self._num_seq_per_image

        steps_per_epoch = total_seq // self._batch_size

        self._build_generator()

        image_captioning_model = ImageCaptioningModel(
            self._max_seq_length,
            rnn_mode=self._rnn_mode,
            drop_rate=self._drop_rate,
            hidden_dim=self._hidden_dim,
            rnn_state_size=self._rnn_state_size,
            embedding_size=self._embedding_size,
            rnn_activation=self._activation,
            cnn_model=self._cnn_model,
            optimizer=self._optimizer,
            initializer=self._initializer,
            learning_rate=self._lr,
            mode=self._mode,
            reg_l1=self._reg_l1,
            reg_l2=self._reg_l2,
            num_word=len(self._generator.caption_processor.word_index) + 1,
            is_trainable=self._is_trainable,
            metrics=self._metrics,
            loss=self._loss)

        image_captioning_model.build_model()
        model = image_captioning_model.image_captioning_model

        save_path = self._model_path

        ckpt_path = self._ckpt_path

        if ckpt_path and os.path.isfile(ckpt_path):
            print("Load Check Point")
            model.load_weights(ckpt_path)

        self._image_captioning_model = model

        model.fit_generator(generator=self._generator,
                            steps_per_epoch=steps_per_epoch,
                            epochs=self._epoch,
                            callbacks=callback(ckpt_path, './logs/'))

        model.save(save_path)
Ejemplo n.º 19
0
 def generate_batches(batchq, imdir, num_batches, batch_size, image_size):
     image_paths = list_pictures(imdir)
     if not image_paths:
         print("Error: no images found in {}".format(imdir))
         sys.exit(1)
     for _ in range(num_batches):
         batch_image_paths = sample(image_paths, batch_size)
         batch = np.vstack([
             load_and_preprocess_img(image_path,
                                     image_size,
                                     center_crop=True)
             for image_path in batch_image_paths
         ])
         batchq.put(batch)
Ejemplo n.º 20
0
    def save_images(self, image_dir):
        image_files = list_pictures(image_dir)
        print("Preprocessing images...\n")
        pbar = tqdm(total=len(image_files))
        h5f = h5py.File('../tmp/img_array.h5', 'w')
        img_cache = []
        for image_file in image_files:
            pbar.update()
            array = image_processor.process_image(image_file)
            img_cache.append((image_file.split('/')[-1], array))

        h5f.create_dataset('img_data', data=img_cache)
        h5f.close()
        print("\nImages preprocessed.")
Ejemplo n.º 21
0
def load_train():
    X_dirname = '../411a3/train'
    Y_filename = '../411a3/train.csv'
    X_filelist = image.list_pictures(X_dirname)
    Y_list = np.loadtxt(Y_filename, dtype='str', delimiter=',')[1:]

    X_train = np.zeros((7000, gist_size))
    y_train = Y_list[:, 1].astype('int64').reshape(-1, 1) - 1

    for i in range(7000):
        im = Image.open(X_filelist[i])
        descriptors = leargist.color_gist(im)
        X_train[i, :] = descriptors
        print('Load image: ' + X_filelist[i])

    return X_train, y_train
Ejemplo n.º 22
0
def generate_image(source_directory_path, generate_directory_path, target_size, count_per_image):
    if not os.path.exists(generate_directory_path):
        os.mkdir(generate_directory_path)

    for picture in list_pictures(source_directory_path):
        img = load_img(picture,target_size=target_size)
        x = img_to_array(img)
        x = np.expand_dims(x, axis=0)

        generator = keras.preprocessing.image.ImageDataGenerator(   rescale= 1.0 / 255,
                                                                    shear_range=0.2,
                                                                    zoom_range=0.2,
                                                                    horizontal_flip=True)

        g = generator.flow(x, batch_size=1, save_to_dir=generate_directory_path, save_prefix='img', save_format='bmp')
        for i in range(count_per_image):
            g.next()
Ejemplo n.º 23
0
def extract_inception_test():
    model = InceptionV3(weights='imagenet', include_top=False)
    print(model.summary())

    X_dirname = '../../411a3/test'
    X_filelist = image.list_pictures(X_dirname)

    X_inception_test = np.zeros((test_size, 2048, 8, 8))

    for i in range(test_size):
        img = image.load_img(X_filelist[i], target_size=target_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        inception = model.predict(x)
        X_inception_test[i, :, :, :] = inception
        print('Read image: ' + X_filelist[i])

    return X_inception_test
def make_generator(filepath, labeldata, batch_size, img_size, test_size):
    """
    画像を読み込むジェネレータを作成します
    """
    # データ一覧の取得
    import sys
    from keras.preprocessing.image import list_pictures
    from progressbar import ProgressBar
    from pathlib import Path

    # 全画像枚数
    num_pics = 0
    list_pics = []
    list_labels = []
    print('count pictures.', file=sys.stderr)
    prog = ProgressBar(0, len(labeldata))
    for index, label in enumerate(labeldata):
        path = Path(filepath).joinpath(label)
        pics = list_pictures(path)
        for p in pics:
            list_pics.append(p)
            list_labels.append(index)
        num_pics += len(pics)
        prog.update(index)
    prog.finish()
    print('{} pictures.'.format(num_pics), file=sys.stderr)

    # データジェネレータの作成
    split = int(num_pics * test_size)
    train_gen = ImageGenerator(data_paths=list_pics[split:],
                               data_classes=list_labels[split:],
                               batch_size=batch_size,
                               width=img_size,
                               height=img_size,
                               num_of_class=len(labeldata))
    val_gen = ImageGenerator(data_paths=list_pics[:split],
                             data_classes=list_labels[:split],
                             batch_size=batch_size,
                             width=img_size,
                             height=img_size,
                             num_of_class=len(labeldata))

    return (train_gen, val_gen)
Ejemplo n.º 25
0
def extract_vgg16_val():
    model = VGG16(weights='imagenet', include_top=False)
    print(model.summary())

    X_dirname = '../../411a3/val'
    X_filelist = image.list_pictures(X_dirname)

    X_vgg_val = np.zeros((val_size, 512, 7, 7))

    for i in range(val_size):
        img = image.load_img(X_filelist[i], target_size=target_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        vgg16 = model.predict(x)
        X_vgg_val[i, :, :, :] = vgg16
        print('Read image: ' + X_filelist[i])

    return X_vgg_val
Ejemplo n.º 26
0
def count_pictures(labeldata, datapath):
    """
    datapathで与えられたディレクトリ内に存在するlabeldataで指定されたディレクトリ以下の画像の枚数を取得します
    """
    from keras.preprocessing.image import list_pictures
    import sys
    from pathlib import Path
    from progressbar import ProgressBar
    print('count pictures.', file=sys.stderr)
    result = 0
    prog = ProgressBar(0, len(labeldata))
    for index, label in enumerate(labeldata):
        path = Path(datapath).joinpath(label)
        pics = list_pictures(path)
        result += len(pics)
        prog.update(index + 1)
    prog.finish()
    print('{} pictures.'.format(result), file=sys.stderr)

    return result
Ejemplo n.º 27
0
def load_aug_image_train():
    X_file_base = '../411a3/data_classify/train/'
    X_train = np.empty((0, gist_size))
    y_train = np.empty((0, 1))
    for i_class in range(nb_classes):
        X_file_part = X_file_base + str(i_class)
        filelist = list_pictures(X_file_part)
        nb_part = len(filelist)
        X_part = np.zeros((nb_part, gist_size))
        for i in range(nb_part):
            im = Image.open(filelist[i])
            descriptors = leargist.color_gist(im)
            X_part[i, :] = descriptors
            print('Load image: ' + filelist[i])
        X_train = np.concatenate((X_train, X_part), axis=0)
        y_train = np.concatenate((y_train, np.ones((nb_part, 1)) * i_class), axis=0)

    print('X_train shape:', X_train.shape)
    print('y_train shape:', y_train.shape)

    return X_train, y_train
Ejemplo n.º 28
0
def load_val_image():
    X_dirname = '../411a3/val'
    X_filelist = image.list_pictures(X_dirname)
    val_samples = len(X_filelist)
    X_val = np.zeros((val_samples, 3, image_load_size, image_load_size))

    for i in range(val_samples):
        img = image.load_img(X_filelist[i],
                             target_size=(image_load_size, image_load_size))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        X_val[i, :, :, :] = x
        print('Predict image: ' + X_filelist[i])

    if K.image_dim_ordering() == 'tf':
        X_val = X_val.transpose(0, 2, 3, 1)

    # print('X_val shape:', X_val.shape)

    return X_val
Ejemplo n.º 29
0
def get_data_list(data_list, target_size, category_size):
    X = []
    y = []

    for item in data_list:
        for picture in list_pictures(item['directory_path']):
            img = img_to_array(load_img(picture, target_size=target_size))
            X.append(img)
            y.append(item['correct_value'])

    # arrayに変換
    X = np.asarray(X)
    y = np.asarray(y)

    # 画素値を0から1の範囲に変換
    X = X.astype('float32')
    X = X / 255.0

    # クラスの形式を変換
    y = np_utils.to_categorical(y, category_size)

    return (X, y)
Ejemplo n.º 30
0
def make_dataset_reaction(out_x, out_y, labeldata, datapath, size):
    """
    datapathで指定されたディレクトリ内のllabeldataで指定されたディレクトリ以下の画像ファイルをsize*sizeにリサイズしout_x・out_yに格納します
    また、読み込みに失敗した画像パスのリストを返します
    """
    from keras.preprocessing.image import img_to_array, list_pictures, load_img
    import sys
    import numpy as np
    from pathlib import Path
    from progressbar import ProgressBar
    IMG = np.empty((1, size, size, 3), dtype='int8')
    POSITION = 0
    ERROR = []
    for index, label in enumerate(labeldata):
        path = Path(datapath).joinpath(label)
        pics = list_pictures(path)
        prog = ProgressBar(0, len(pics))
        print('[{}/{}] load {} {} pictures.'.format(index + 1, len(labeldata),
                                                    path, len(pics)),
              file=sys.stderr)
        count = 0
        for picture in pics:
            try:
                IMG[0] = img_to_array(
                    load_img(picture, target_size=(size, size)))
                out_x[POSITION + count] = IMG
                prog.update(count + 1)
                count += 1
            except Exception as identifier:
                print(picture, identifier)
                pics.remove(picture)
                ERROR.append(picture)
        out_y[POSITION:POSITION + len(pics)] = np.full(len(pics),
                                                       index,
                                                       dtype=int)
        POSITION += len(pics)
        prog.finish()

    return ERROR