def input_fn(mode):
     if mode == tf.estimator.ModeKeys.TRAIN:
         image_set = self.get_raw_image_set(with_label=True)
         feature_set = FeatureSet.image_frame(
             image_set.to_image_frame())
         train_transformer = ChainedPreprocessing([
             ImageBytesToMat(),
             ImageResize(256, 256),
             ImageRandomCrop(224, 224),
             ImageRandomPreprocessing(ImageHFlip(), 0.5),
             ImageChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224,
                                   0.225),
             ImageMatToTensor(to_RGB=True, format="NHWC"),
             ImageSetToSample(input_keys=["imageTensor"],
                              target_keys=["label"])
         ])
         feature_set = feature_set.transform(train_transformer)
         feature_set = feature_set.transform(ImageFeatureToSample())
         training_dataset = TFDataset.from_feature_set(
             feature_set,
             features=(tf.float32, [224, 224, 3]),
             labels=(tf.int32, [1]),
             batch_size=8)
         return training_dataset
     else:
         raise NotImplementedError
    def input_fn(mode):

        if mode == tf.estimator.ModeKeys.TRAIN:
            # demo_small directory structure
            # \demo_small
            #    \cats
            #       cat images ...
            #    \dogs
            #       dog images ...
            image_set = ImageSet.read("./datasets/cat_dog/demo_small",
                                      sc=sc,
                                      with_label=True,
                                      one_based_label=False)
            train_transformer = ChainedPreprocessing([
                ImageBytesToMat(),
                ImageResize(256, 256),
                ImageRandomCrop(224, 224),
                ImageRandomPreprocessing(ImageHFlip(), 0.5),
                ImageChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224,
                                      0.225),
                ImageMatToTensor(to_RGB=True, format="NHWC"),
                ImageSetToSample(input_keys=["imageTensor"],
                                 target_keys=["label"])
            ])
            feature_set = FeatureSet.image_frame(image_set.to_image_frame())
            feature_set = feature_set.transform(train_transformer)
            dataset = TFDataset.from_feature_set(feature_set,
                                                 features=(tf.float32,
                                                           [224, 224, 3]),
                                                 labels=(tf.int32, [1]),
                                                 batch_size=16)
        else:
            raise NotImplementedError

        return dataset
예제 #3
0
 def test_training_for_feature_set(self):
     model = self.create_image_model()
     feature_set = self.create_train_features_Set()
     training_dataset = TFDataset.from_feature_set(feature_set,
                                                   features=(tf.float32, [224, 224, 3]),
                                                   labels=(tf.int32, [1]),
                                                   batch_size=8)
     model.fit(training_dataset)
예제 #4
0
    val_transformer = ChainedPreprocessing([ImagePixelBytesToMat(),
                                            ImageResize(256, 256),
                                            ImageCenterCrop(image_size, image_size),
                                            ImageChannelNormalize(123.0, 117.0, 104.0),
                                            ImageMatToTensor(format="NHWC", to_RGB=False),
                                            ImageSetToSample(input_keys=["imageTensor"],
                                                             target_keys=["label"])])
    raw_val_data = get_inception_data(options.folder, sc, "val")
    val_data = FeatureSet.image_frame(raw_val_data).transform(val_transformer)
    val_data = val_data.transform(ImageFeatureToSample())

    train_data = train_data.transform(ImageFeatureToSample())

    dataset = TFDataset.from_feature_set(train_data,
                                         features=(tf.float32, [224, 224, 3]),
                                         labels=(tf.int32, [1]),
                                         batch_size=options.batchSize,
                                         validation_dataset=val_data)

    images, labels = dataset.tensors

    # As sequence file's label is one-based, so labels need to subtract 1.
    zero_based_label = labels - 1

    is_training = tf.placeholder(dtype=tf.bool, shape=())

    with slim.arg_scope(inception_v1.inception_v1_arg_scope(weight_decay=0.0,
                                                            use_batch_norm=False)):
        logits, end_points = inception_v1.inception_v1(images,
                                                       dropout_keep_prob=0.6,
                                                       num_classes=1000,