Exemple #1
0
def data_generator(batch_size):
    (X_train, Y_train), (X_test, Y_test) = keras.datasets.cifar10.load_data()
    X_train = X_train.astype(np.float32) / 255
    X_test = X_test.astype(np.float32) / 255

    datagen = ImageDataGenerator(featurewise_center=True, zca_whitening=True)
    datagen.fit(X_train)

    X_train = datagen.standardize(X_train)
    X_test = datagen.standardize(X_test)

    return tf.data.Dataset.from_tensor_slices((X_train, Y_train)).batch(batch_size), \
           tf.data.Dataset.from_tensor_slices((X_test, Y_test)).batch(batch_size)
Exemple #2
0
me = []
se = []
ret, _ = cap.read()
cap.set(0, 0)
res = []
t = time()
i = 0
while ret:
    print("Frame No: ", i)
    ret, I = cap.read()
    if not ret:
        break

    I = cv2.resize(I, (224, 224))

    img = np.reshape(I, [1, 224, 224, 3])
    img = img.astype('float32')
    img_data = test_datagen.standardize(img)
    me.append(img_data.mean())
    se.append(img_data.std())

    f_all = custom_resnet_model.predict(img_data)
    f_all = ["%.5f" % (100 * f) for f in list(f_all[0])]
    res.append(f_all)
    i = i + 1
run_time = time() - t
res = np.array(res)
np.save('predictions.npy', res, allow_pickle=False)
print('Model Load Time = ' + str(model_load_time))
print('Run Time = ' + str(run_time))
Exemple #3
0
class PredictGenerator(keras.utils.Sequence):
    def __init__(self,
                 slide_path: str,
                 batch_size: int,
                 area_to_predict: Area_box = None,
                 tqdm_enabled=True):
        self.batch_size = batch_size
        self.slide = Slide(slide_path)

        self.tqdm_enabled = tqdm_enabled
        self.tqdm = None

        self.area_to_predict = area_to_predict if area_to_predict else (
            0, 0, self.slide.slide.dimensions[0],
            self.slide.slide.dimensions[1])

        self.datagen_for_standartize = ImageDataGenerator(
            samplewise_center=True,
            samplewise_std_normalization=True,
        )

        x = range(self.area_to_predict[0], self.area_to_predict[2] - TILE_SIZE,
                  TILE_STEP)
        y = range(self.area_to_predict[1], self.area_to_predict[3] - TILE_SIZE,
                  TILE_STEP)
        self.coordinates_grid = np.stack(np.meshgrid(x, y), axis=2)

        self.all_coordinates = self.coordinates_grid.reshape(-1, 2)
        self.label_names_to_id = DatasetPreparation.get_label_name_to_label_id_dict(
        )

    def __len__(self):
        return int(np.ceil(len(self.all_coordinates) / self.batch_size))

    def __getitem__(self, index):
        if self.tqdm_enabled:
            self.update_tqdm(index)

        if isinstance(index, slice):
            return [
                self[i] for i in range(index.start, index.stop, index.step)
            ]

        addresses = self.all_coordinates[index * self.batch_size:(index + 1) *
                                         self.batch_size]

        X = self.__data_generation(addresses)

        return X, np.zeros(len(X))

    def update_tqdm(self, index):
        if index == 0:
            self.tqdm = tqdm.tqdm(total=len(self))

        self.tqdm.update(1)

        if index == len(self) - 1:
            self.tqdm.close()

    def __data_generation(self, addresses: np.ndarray) -> np.ndarray:
        data = np.asarray([
            np.asarray(
                self.slide.cut_tile(*add).resize(
                    NETWORK_INPUT_SHAPE[:2]).convert('RGB'))
            for add in addresses
        ]).astype(np.float)

        return self.datagen_for_standartize.standardize(data)