Exemplo n.º 1
0
def train():
    # Download and split data.
    common.split_data(TIME_STEPS)

    # Build and compile the model.
    model = build_model()

    # model.save_weights(CKP_PATH.format(epoch=0))

    # Load last checkpoint if any.
    # model.load_weights(
    #     tf.train.latest_checkpoint(
    #         os.path.dirname(CKP_PATH)
    #     )
    # )

    train_idg = generators.TimeDistributedImageDataGenerator(
        rotation_range=30,
        zoom_range=0.15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.15,
        horizontal_flip=True,
        rescale=1. / 255,
        time_steps=TIME_STEPS,
    )

    validation_idg = generators.TimeDistributedImageDataGenerator(
        time_steps=TIME_STEPS,
    )

    history = model.fit(
        train_idg.flow_from_directory(
            common.TRAIN_DATA_PATH,
            target_size=(48, 48),
            batch_size=BATCH_SIZE,
            class_mode='sparse',
            shuffle=False,
            color_mode='rgb',
            # classes=['agree_pure', 'agree_considered'],
            # save_to_dir='./data/train'
        ),
        validation_data=validation_idg.flow_from_directory(
            common.VALIDATION_DATA_PATH,
            target_size=(48, 48),
            batch_size=BATCH_SIZE,
            class_mode='sparse',
            shuffle=False,
            color_mode='rgb',
            # classes=['agree_pure', 'agree_considered'],
            # save_to_dir='./data/test'
        ),
        callbacks=CALLBACKS,
        epochs=EPOCHS,
    )

    model.save(SVD_PATH)

    common.plot_acc_loss(history, PLT_PATH)
Exemplo n.º 2
0
import tensorflow as tf
import pandas as pd
import common


class History:
    history = {}


history = History()

df = pd.read_csv('../models/trial-final/32/12/log.csv')
df = df[['accuracy', 'loss', 'val_accuracy', 'val_loss']]
d = df.to_dict()

for x in d:
    history.history[x] = [v for k, v in d[x].items()]

common.plot_acc_loss(history, '../models/trial-final/32/12/plot.png')
Exemplo n.º 3
0
def train():
    tf.keras.utils.get_file(
        fname='cec-videos-augmented.tar.gz',
        origin='https://unir-tfm-cec.s3.us-east-2.amazonaws.com/cec-videos-augmented.tar.gz',
        extract=True
    )

    for batch_size in BATCH_SIZE:
        for time_steps in TIME_STEPS:
            path = TRL_PATH + f'/{batch_size}/{time_steps}'
            os.makedirs(path, exist_ok=True)

            # Build and compile the model.
            model = build_model(time_steps, len(CLASSES))

            data_aug = tf.keras.preprocessing.image.ImageDataGenerator(
                preprocessing_function=tf.keras.applications.mobilenet.preprocess_input
            )

            train_idg = SlidingFrameGenerator(
                classes=CLASSES,
                glob_pattern=common.HOME + '/.keras/datasets/cec-videos-augmented/{classname}/*.avi',
                nb_frames=time_steps,
                split_val=.2,
                shuffle=True,
                batch_size=batch_size,
                target_shape=(224, 224),
                nb_channel=3,
                transformation=data_aug,
                use_frame_cache=False
            )

            validation_idg = train_idg.get_validation_generator()

            # Configure callbacks
            callbacks = [
                tf.keras.callbacks.ModelCheckpoint(
                    filepath=path + '/model',
                    monitor='val_accuracy',
                    mode='max',
                    save_best_only=True,
                    verbose=1,
                ),
                tf.keras.callbacks.EarlyStopping(
                    monitor='val_loss',
                    mode='min',
                    verbose=1,
                    patience=int(EPOCHS * .01)
                ),
                tf.keras.callbacks.CSVLogger(
                    filename=path + '/log.csv'
                ),
                tf.keras.callbacks.TensorBoard(
                    log_dir=path + '/tb',
                    histogram_freq=1
                ),
                tf.keras.callbacks.ReduceLROnPlateau(
                    verbose=1
                ),
            ]

            history = model.fit(
                train_idg,
                validation_data=validation_idg,
                callbacks=callbacks,
                epochs=EPOCHS,
            )

            common.plot_acc_loss(history, path + '/plot.png')
Exemplo n.º 4
0
def train():
    tf.keras.utils.get_file(
        fname='cec-videos.tar.gz',
        origin=
        'https://unir-tfm-cec.s3.us-east-2.amazonaws.com/cec-videos.tar.gz',
        extract=True)

    data = pandas.DataFrame(None,
                            columns=[
                                'trial', 'batch_size', 'time_steps', 'cycle',
                                'files', 'sequences'
                            ])
    data['trial'] = TRIAL

    for batch_size in BATCH_SIZE:
        for time_steps in TIME_STEPS:
            path = TRL_PATH + f'/{batch_size}/{time_steps}'
            os.makedirs(path, exist_ok=True)

            # Build and compile the model.
            model = build_model(time_steps, len(CLASSES))

            data_aug = tf.keras.preprocessing.image.ImageDataGenerator(
                zoom_range=.1,
                horizontal_flip=True,
                rotation_range=8,
                width_shift_range=.2,
                height_shift_range=.2,
                preprocessing_function=tf.keras.applications.mobilenet_v2.
                preprocess_input)

            train_idg = SlidingFrameGenerator(classes=CLASSES,
                                              glob_pattern=common.VIDEOS_PATH,
                                              nb_frames=time_steps,
                                              split_val=.2,
                                              shuffle=True,
                                              batch_size=batch_size,
                                              target_shape=(224, 224),
                                              nb_channel=3,
                                              transformation=data_aug,
                                              use_frame_cache=False)

            keras_video.utils.show_sample(train_idg)

            validation_idg = train_idg.get_validation_generator()

            row = {
                'trial': TRIAL,
                'batch_size': batch_size,
                'cycle': 'training',
                'time_steps': time_steps,
                'files': train_idg.files_count,
                'sequences': len(train_idg.vid_info)
            }
            data = data.append(row, ignore_index=True)

            row = {
                'trial': TRIAL,
                'batch_size': batch_size,
                'cycle': 'validation',
                'time_steps': time_steps,
                'files': validation_idg.files_count,
                'sequences': len(validation_idg.vid_info)
            }
            data = data.append(row, ignore_index=True)

            # Configure callbacks
            callbacks = [
                tf.keras.callbacks.ModelCheckpoint(
                    filepath=path + '/model',
                    monitor='val_accuracy',
                    mode='max',
                    save_best_only=True,
                    verbose=1,
                ),
                tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                 mode='min',
                                                 verbose=1,
                                                 patience=int(EPOCHS * .01)),
                tf.keras.callbacks.CSVLogger(filename=path + '/log.csv'),
                tf.keras.callbacks.TensorBoard(log_dir=path + '/tb',
                                               histogram_freq=1),
                tf.keras.callbacks.ReduceLROnPlateau(verbose=1),
            ]

            history = model.fit(
                train_idg,
                validation_data=validation_idg,
                callbacks=callbacks,
                epochs=EPOCHS,
            )

            common.plot_acc_loss(history, path + '/plot.png')

    data.to_csv('trial_01.csv')
Exemplo n.º 5
0
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit(
    train_idg.flow_from_directory(
        common.TRAIN_DATA_PATH,
        target_size=(224, 224),
        batch_size=32,
        class_mode='categorical',
        shuffle=True,
        seed=common.SEED_VALUE,
        # classes=['agree_pure']
        # save_to_dir='./data/train'
    ),
    epochs=50,
    validation_data=validation_idg.flow_from_directory(
        common.TEST_DATA_PATH,
        target_size=(224, 224),
        batch_size=32,
        class_mode='categorical',
        shuffle=True,
        seed=common.SEED_VALUE,
        # classes=['agree_pure']
        # save_to_dir='./data/test'
    ))

model.save('models/01/ResNet50')

common.plot_acc_loss(history, '../models/01/ResNet50/plot.png')
Exemplo n.º 6
0
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit(
    train_idg.flow_from_directory(
        common.TRAIN_DATA_PATH,
        target_size=(224, 224),
        batch_size=32,
        class_mode='categorical',
        shuffle=True,
        seed=common.SEED_VALUE,
        # classes=['agree_pure']
        # save_to_dir='./data/train'
    ),
    epochs=50,
    validation_data=validation_idg.flow_from_directory(
        common.TEST_DATA_PATH,
        target_size=(224, 224),
        batch_size=32,
        class_mode='categorical',
        shuffle=True,
        seed=common.SEED_VALUE,
        # classes=['agree_pure']
        # save_to_dir='./data/test'
    ))

model.save('models/04/ResNet152')

common.plot_acc_loss(history, '../models/04/ResNet152/plot.png')
Exemplo n.º 7
0
    metrics=['accuracy']
)

history = model.fit(
    train_idg.flow_from_directory(
        common.TRAIN_DATA_PATH,
        target_size=(224, 224),
        batch_size=32,
        class_mode='categorical',
        shuffle=True,
        seed=common.SEED_VALUE,
        # classes=['agree_pure']
        # save_to_dir='./data/train'
    ),
    epochs=50,
    validation_data=validation_idg.flow_from_directory(
        common.TEST_DATA_PATH,
        target_size=(224, 224),
        batch_size=32,
        class_mode='categorical',
        shuffle=True,
        seed=common.SEED_VALUE,
        # classes=['agree_pure']
        # save_to_dir='./data/test'
    )
)

model.save('models/03/DenseNet201')

common.plot_acc_loss(history, '../models/03/DenseNet201/plot.png')