def train_model(self): from keras_video import VideoFrameGenerator from keras.preprocessing.image import ImageDataGenerator from keras_video.utils import show_sample classes = ['Real', 'Deepfake'] classes.sort() size = (224, 224) channels = 3 nbframe = 10 bs = 2 glob_pattern = 'Dataset_Face_Extracted_redo/{classname}/*.mp4' data_aug = ImageDataGenerator(zoom_range=.1, horizontal_flip=True, rotation_range=8, width_shift_range=.2, height_shift_range=.2) train_generator = VideoFrameGenerator(classes=classes, glob_pattern=glob_pattern, nb_frames=nbframe, split=.25, shuffle=True, batch_size=bs, target_shape=size, nb_channel=channels, transformation=data_aug, use_frame_cache=True) valid = train_generator.get_validation_generator() show_sample(train_generator) input_shape = (nbframe, ) + size + (channels, ) model = self.timeModel(input_shape) model.compile(Adam(0.001), 'categorical_crossentropy', metrics=['acc']) epochs = 50 callbacks = [ ReduceLROnPlateau(verbose=1), ModelCheckpoint( filepath=os.path.join('dataLSTM', 'checkpoints', str(model) + \ '.{epoch:03d}-{val_loss:.3f}.hdf5'), verbose=1, save_best_only=True ) ] model.fit_generator(train_generator, validation_data=valid, verbose=1, epochs=epochs, callbacks=callbacks)
def create_train_dataset(self, model, prop_val_dataset = 0.33, do_data_aug = True, batch_size = 8, preprocess_input = None): """ Retrieve the train, validation and test datasets model: buildModel class A keras model created by buildModel class. prop_val_dataset: float, Optional A float value between 0 and 1 to split the training set into train and validation. Default value 0.33. do_data_aug: boolean, Optional Whether or not do data augmentation to the frames. Default value True. """ _, time_step, *size, channels = model.input_shape self.size = tuple(size) self.channels = channels self.time_step = time_step # for data augmentation if do_data_aug: data_aug = keras.preprocessing.image.ImageDataGenerator( preprocessing_function = preprocess_input, zoom_range=.1, horizontal_flip = True, rotation_range = 8, width_shift_range = .2, height_shift_range = .2) else: data_aug = None # Create video frame generator train_dataset = VideoFrameGenerator( classes = self.categories, glob_pattern = self.glob_pattern, nb_frames = self.time_step, split_val = prop_val_dataset, shuffle = True, batch_size = batch_size, target_shape = self.size, nb_channel = self.channels, transformation = data_aug, use_frame_cache = True) # Create validation_dataset = train_dataset.get_validation_generator() return train_dataset, validation_dataset
def get_frames(filename): SIZE = (128, 128) CHANNELS = 3 NBFRAME = 20 data_aug = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255) test=VideoFrameGenerator(glob_pattern=filename,nb_frames=NBFRAME,shuffle=False, batch_size=1, target_shape=SIZE, transformation=data_aug, use_frame_cache=False) return test
def create_video_frames_generator(): glob_pattern = 'short_videos_2/*.avi' gen = VideoFrameGenerator(glob_pattern=glob_pattern, nb_frames=NBFRAME, target_shape=SIZE, batch_size=BS, nb_channel=CHANNELS, use_frame_cache=True) return gen
NBFRAME = 5 BS = 8 EPOCHS=50 CONVSHAPE=SIZE + (CHANNELS,) INSHAPE=(NBFRAME,) + CONVSHAPE #pattern to get videos and classes glob_pattern = '../../data/facerec/{classname}/*.mp4' #Create video frame generator train = VideoFrameGenerator( classes=classes, glob_pattern=glob_pattern, nb_frames=NBFRAME, split_val=.33, shuffle=True, batch_size=BS, target_shape=SIZE, nb_channel=CHANNELS, use_frame_cache=False ) valid = train.get_validation_generator() from keras.layers import Conv2D, BatchNormalization, MaxPool2D, GlobalMaxPool2D def build_convnet(shape=CONVSHAPE): momentum = .9 model = keras.Sequential() model.add(Conv2D(64, (3,3), input_shape=shape, padding='same', activation='relu')) model.add(Conv2D(64, (3,3), padding='same', activation='relu')) model.add(BatchNormalization(momentum=momentum))
mode='auto') # for data augmentation data_augmentation = keras.preprocessing.image.ImageDataGenerator( zoom_range=.1, rotation_range=8, width_shift_range=.2, height_shift_range=.2) # Create video frame generator train_set = VideoFrameGenerator(classes=class_ids, glob_pattern=video_path, nb_frames=no_of_frames, split_test=.4, split_val=.2, shuffle=True, batch_size=batch_size_value, target_shape=(img_width, img_height), nb_channel=channels, transformation=data_augmentation, use_frame_cache=True) validation_set = train_set.get_validation_generator() test_set = train_set.get_test_generator() history = model.fit_generator(train_set, validation_data=validation_set, verbose=1, epochs=no_of_epochs, callbacks=[checkpoint, early_stopping_criteria])