Example #1
0
def features_2D_predict_generator(sFrameBaseDir: str,
                                  sFeatureBaseDir: str,
                                  keModel: keras.Model,
                                  nFramesNorm: int = 40):
    """
    Used by the MobileNet-LSTM NN architecture.
    The (video) frames (2-dimensional) in sFrameBaseDir are fed into keModel (eg MobileNet without top layers)
    and the resulting features are save to sFeatureBaseDir.
    """

    # do not (partially) overwrite existing feature directory
    #if os.path.exists(sFeatureBaseDir):
    #    warnings.warn("\nFeature folder " + sFeatureBaseDir + " alredy exists, calculation stopped")
    #    return

    # prepare frame generator - without shuffling!
    _, h, w, c = keModel.input_shape
    genFrames = FramesGenerator(sFrameBaseDir,
                                1,
                                nFramesNorm,
                                h,
                                w,
                                c,
                                liClassesFull=None,
                                bShuffle=False)

    print("Predict features with %s ... " % keModel.name)
    nCount = 0
    # Predict - loop through all samples
    for _, seVideo in genFrames.dfVideos.iterrows():

        # ... sFrameBaseDir / class / videoname=frame-directory
        sVideoName = seVideo.sFrameDir.split("/")[-1]
        sLabel = seVideo.sLabel
        sFeaturePath = sFeatureBaseDir + "/" + sLabel + "/" + sVideoName + ".npy"

        # check if already predicted
        if os.path.exists(sFeaturePath):
            print("Video %5d: features already extracted to %s" %
                  (nCount, sFeaturePath))
            nCount += 1
            continue

        # get normalized frames and predict feature
        arX, _ = genFrames.data_generation(seVideo)
        arFeature = keModel.predict(arX, verbose=0)

        # save to file
        os.makedirs(sFeatureBaseDir + "/" + sLabel, exist_ok=True)
        np.save(sFeaturePath, arFeature)

        print("Video %5d: features %s saved to %s" %
              (nCount, str(arFeature.shape), sFeaturePath))
        nCount += 1

    print("%d features saved to files in %s" % (nCount + 1, sFeatureBaseDir))
    return
def features_3D_predict_generator(sFrameBaseDir:str, sFeatureBaseDir:str, 
    keModel:keras.Model, nBatchSize:int = 16):
    """
    Used by I3D-top-only model.
    The videos (frames) are fed into keModel (=I3D without top layers) and
    resulting features are saved to disc. 
    (Later these features are used to train a small model containing 
    only the adjusted I3D top layers.)
    """

    # do not (partially) overwrite existing feature directory
    #if os.path.exists(sFeatureBaseDir): 
    #    warnings.warn("\nFeature folder " + sFeatureBaseDir + " alredy exists, calculation stopped") 
    #    return

    # prepare frame generator - without shuffling!
    _, nFramesModel, h, w, c = keModel.input_shape
    genFrames = FramesGenerator(sFrameBaseDir, nBatchSize, nFramesModel, h, w, c, 
        liClassesFull = None, bShuffle=False)

    # Predict
    print("Predict features with %s ... " % keModel.name)

    nCount = 0
    # loop through all samples
    for _, seVideo in genFrames.dfVideos.iterrows():

        # ... sFrameBaseDir / class / videoname=frame-directory
        sVideoName = seVideo.sFrameDir.split("/")[-1]
        sLabel = seVideo.sLabel
        sFeaturePath = sFeatureBaseDir + "/" + sLabel + "/" + sVideoName + ".npy"

        # check if already predicted
        if os.path.exists(sFeaturePath):
            print("Video %5d: features already extracted to %s" % (nCount, sFeaturePath))
            nCount += 1
            continue

        # get normalized frames
        arFrames, _ = genFrames.data_generation(seVideo)

        # predict single sample
        arFeature = keModel.predict(np.expand_dims(arFrames, axis=0))[0]

        # save to file
        os.makedirs(sFeatureBaseDir + "/" + sLabel, exist_ok = True)
        np.save(sFeaturePath, arFeature)

        print("Video %5d: features %s saved to %s" % (nCount, str(arFeature.shape), sFeaturePath))
        nCount += 1

    print("%d features saved to files in %s" % (nCount+1, sFeatureBaseDir))
    
    return
def train_I3D_oflow_end2end():
    """
    Training the keras model.
    :return: None
    """
    sClassFile = "class.csv"
    sOflowDir = "Training_data"
    sModelDir = "model"

    diTrainTop = {"fLearn": 1e-3, "nEpochs": 5}

    diTrainAll = {"fLearn": 1e-4, "nEpochs": 1}

    nBatchSize = 4

    print("\nStarting I3D end2end training ...")
    print(os.getcwd())

    oClasses = VideoClasses(sClassFile)

    # Load training data
    path = os.path.join(sOflowDir, "train")
    genFramesTrain = FramesGenerator(path, nBatchSize, 40, 224, 224, 2,
                                     oClasses.liClasses)
    path = os.path.join(sOflowDir, "val")
    genFramesVal = FramesGenerator(path, nBatchSize, 40, 224, 224, 2,
                                   oClasses.liClasses)

    if (genFramesTrain):
        print("Generated training data.")
    if (genFramesVal):
        print("Generated validation data")

    # Load pretrained i3d model and adjust top layer
    print("Load pretrained I3D flow model ...")
    keI3DOflow = Inception_Inflated3d(include_top=False,
                                      weights='flow_imagenet_and_kinetics',
                                      input_shape=(40, 224, 224, 2))
    print("Add top layers with %d output classes ..." % oClasses.nClasses)
    keI3DOflow = layers_freeze(keI3DOflow)
    keI3DOflow = add_i3d_top(keI3DOflow, oClasses.nClasses, dropout_prob=0.5)
    if (keI3DOflow):
        print("Model loaded successfully")

    sLog = time.strftime("%Y%m%d-%H%M",
                         time.gmtime()) + "-%s%03d-oflow-i3d" % ("ISL", 105)

    # Save the model
    os.makedirs(sModelDir, exist_ok=True)
    cpTopLast = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                sLog + "-top-last.h5",
                                                verbose=1,
                                                save_best_only=False,
                                                save_weights_only=False)
    cpTopBest = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                sLog + "-top-best.h5",
                                                verbose=1,
                                                save_best_only=True,
                                                save_weights_only=False)
    cpAllLast = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                sLog + "-entire-last.h5",
                                                verbose=1,
                                                save_weights_only=False,
                                                save_best_only=False)
    cpAllBest = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                sLog + "-entire-best.h5",
                                                verbose=1,
                                                save_best_only=True,
                                                save_weights_only=False)
    cbTensorBoard = keras.callbacks.TensorBoard(log_dir="logs",
                                                histogram_freq=1,
                                                update_freq='batch',
                                                write_graph=True,
                                                write_images=True,
                                                batch_size=32)
    callbacks1 = [cpTopLast, cpTopBest, cbTensorBoard]
    callbacks2 = [cpAllBest, cpAllLast, cbTensorBoard]

    # Fit top layers
    print("Fit I3D top layers with generator: %s" % (diTrainTop))
    optimizer = keras.optimizers.Adam(lr=diTrainTop["fLearn"])
    keI3DOflow.compile(loss='categorical_crossentropy',
                       optimizer=optimizer,
                       metrics=['accuracy'])
    count_params(keI3DOflow)

    keI3DOflow.fit_generator(generator=genFramesTrain,
                             validation_data=genFramesVal,
                             epochs=diTrainTop["nEpochs"],
                             workers=4,
                             use_multiprocessing=False,
                             max_queue_size=8,
                             verbose=1,
                             callbacks=callbacks1)
    """
    Fit entire I3D model
    print("Finetune all I3D layers with generator: %s" % (diTrainAll))
    keI3DOflow = layers_unfreeze(keI3DOflow)
    optimizer = keras.optimizers.Adam(lr = diTrainAll["fLearn"])
    keI3DOflow.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    count_params(keI3DOflow)    
    
    keI3DOflow.fit_generator(
        generator = genFramesTrain,
        validation_data = genFramesVal,
        epochs = diTrainAll["nEpochs"],
        workers = 4,                 
        use_multiprocessing = False,
        max_queue_size = 8, 
        verbose = 1,
        callbacks=callbacks2)
    """

    return
Example #4
0
def train_I3D_oflow_end2end():
    """
    Training the keras model.
    :return: None
    """
    # directories
    sClassFile = "class.csv"
    sOflowDir = "Training_data"
    sModelDir = "model"

    diTrainTop = {
        "fLearn": 1e-6,
        "nEpochs": 5}

    diTrainAll = {
        "fLearn": 1e-4,
        "nEpochs": 1}

    nBatchSize = 4

    print("\nStarting I3D end2end training ...")
    print(os.getcwd())

    # read the ChaLearn classes
    oClasses = VideoClasses(sClassFile)
    # Load training data
    # print(oClasses.liClasses)
    path = os.path.join(sOflowDir, "train")
    genFramesTrain = FramesGenerator(path, nBatchSize, 40, 224, 224, 2, oClasses.liClasses)
    path = os.path.join(sOflowDir, "val")
    genFramesVal = FramesGenerator(path, nBatchSize, 40, 224, 224, 2, oClasses.liClasses)
    if (genFramesTrain):
        print("train true")
    if (genFramesVal):
        print("val true")

    # Load pretrained i3d model and adjust top layer
    print("Load pretrained I3D flow model ...")
    keI3DOflow = models.load_model("model/20190320-2118-ISL105-oflow-i3d-top-best.h5")

    if (keI3DOflow):
        print("loaded successfully")
        # print(keI3DOflow.summary())
    # Prep logging
    sLog = time.strftime("%Y%m%d-%H%M", time.gmtime()) + "-%s%03d-oflow-i3d" % ("ISL", 105)

    # Helper: Save results
    csv_logger = keras.callbacks.CSVLogger("log/" + sLog + "-acc.csv", append=True)

    # Helper: Save the model
    os.makedirs(sModelDir, exist_ok=True)
    cpTopLast = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" + sLog + "-top-last.h5", verbose=1,
                                                save_best_only=False, save_weights_only=False)
    cpTopBest = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" + sLog + "-top-best.h5", verbose=1,
                                                save_best_only=False, save_weights_only=False)
    cpAllLast = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" + sLog + "-entire-last.h5", verbose=1,
                                                save_weights_only=False, save_best_only=False)
    cpAllBest = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" + sLog + "-entire-best.h5", verbose=1,
                                                save_best_only=False, save_weights_only=False)
    cbTensorBoard = keras.callbacks.TensorBoard(log_dir="logs", histogram_freq=1, update_freq='batch',
                                                write_graph=True, write_images=True, batch_size=32)
    callbacks1 = [cpTopLast, cpTopBest, cbTensorBoard]

    #callbacks2 = [cpAllBest, cpAllLast, cbTensorBoard]

    # Fit top layers
    print("Fit I3D top layers with generator: %s" % (diTrainTop))
    optimizer = keras.optimizers.Adam(lr=diTrainTop["fLearn"], decay=1e-6)
    keI3DOflow.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    count_params(keI3DOflow)

    keI3DOflow.fit_generator(
        generator=genFramesTrain,
        validation_data=genFramesVal,
        epochs=diTrainTop["nEpochs"],
        workers=4,
        use_multiprocessing=False,
        max_queue_size=8,
        verbose=1,
        callbacks=callbacks1)

    '''
    # Fit entire I3D model
    print("Finetune all I3D layers with generator: %s" % (diTrainAll))
    keI3DOflow = layers_unfreeze(keI3DOflow)
    optimizer = keras.optimizers.Adam(lr = diTrainAll["fLearn"])
    keI3DOflow.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    count_params(keI3DOflow)    
    
    keI3DOflow.fit_generator(
        generator = genFramesTrain,
        validation_data = genFramesVal,
        epochs = diTrainAll["nEpochs"],
        workers = 4,                 
        use_multiprocessing = False,
        max_queue_size = 8, 
        verbose = 1,
        callbacks=callbacks2)
        '''

    return
Example #5
0
def train_I3D_oflow_end2end(diVideoSet):
    """ 
    * Loads pretrained I3D model, 
    * reads optical flow data generated from training videos,
    * adjusts top-layers adequately for video data,
    * trains only news top-layers,
    * then fine-tunes entire neural network,
    * saves logs and models to disc.
    """

    # directories
    sFolder = "%03d-%d" % (diVideoSet["nClasses"], diVideoSet["nFramesNorm"])
    #sClassFile       = "data-set/chalearn/249/class.csv"
    sClassFile = "data-set/%s/%03d/class.csv" % (diVideoSet["sName"],
                                                 diVideoSet["nClasses"])
    #sVideoDir        = "data-set/%s/%03d"%(diVideoSet["sName"], diVideoSet["nClasses"])
    #sImageDir        = "data-temp/%s/%s/image"%(diVideoSet["sName"], sFolder)
    #sImageFeatureDir = "data-temp/%s/%s/image-i3d"%(diVideoSet["sName"], sFolder)
    #sOflowDir        = "data-temp/chalearn/249-40/image"
    sOflowDir = "data-temp/%s/%s/oflow" % (diVideoSet["sName"], sFolder)
    #sOflowFeatureDir = "data-temp/%s/%s/oflow-i3d"%(diVideoSet["sName"], sFolder)

    sModelDir = "model"

    diTrainTop = {"fLearn": 1e-3, "nEpochs": 3}

    diTrainAll = {"fLearn": 1e-4, "nEpochs": 17}

    nBatchSize = 4

    print("\nStarting I3D end2end training ...")
    print(os.getcwd())

    # read the ChaLearn classes
    oClasses = VideoClasses(sClassFile)

    # Load training data
    genFramesTrain = FramesGenerator(sOflowDir + "/train", nBatchSize,
                                     diVideoSet["nFramesNorm"], 224, 224, 2,
                                     oClasses.liClasses)
    genFramesVal = FramesGenerator(sOflowDir + "/valid", nBatchSize,
                                   diVideoSet["nFramesNorm"], 224, 224, 2,
                                   oClasses.liClasses)

    # Load pretrained i3d model and adjust top layer
    print("Load pretrained I3D flow model ...")
    keI3DOflow = Inception_Inflated3d(include_top=False,
                                      weights='flow_imagenet_and_kinetics',
                                      input_shape=(diVideoSet["nFramesNorm"],
                                                   224, 224, 2))
    print("Add top layers with %d output classes ..." % oClasses.nClasses)
    keI3DOflow = layers_freeze(keI3DOflow)
    keI3DOflow = add_i3d_top(keI3DOflow, oClasses.nClasses, dropout_prob=0.5)

    # Prep logging
    sLog = time.strftime("%Y%m%d-%H%M", time.gmtime()) + \
        "-%s%03d-oflow-i3d"%(diVideoSet["sName"], diVideoSet["nClasses"])

    # Helper: Save results
    csv_logger = keras.callbacks.CSVLogger("log/" + sLog + "-acc.csv",
                                           append=True)

    # Helper: Save the model
    os.makedirs(sModelDir, exist_ok=True)
    cpTopLast = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                sLog + "-above-last.h5",
                                                verbose=0)
    cpTopBest = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                sLog + "-above-best.h5",
                                                verbose=1,
                                                save_best_only=True)
    cpAllLast = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                sLog + "-entire-last.h5",
                                                verbose=0)
    cpAllBest = keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                sLog + "-entire-best.h5",
                                                verbose=1,
                                                save_best_only=True)

    # Fit top layers
    print("Fit I3D top layers with generator: %s" % (diTrainTop))
    optimizer = keras.optimizers.Adam(lr=diTrainTop["fLearn"])
    keI3DOflow.compile(loss='categorical_crossentropy',
                       optimizer=optimizer,
                       metrics=['accuracy'])
    count_params(keI3DOflow)

    keI3DOflow.fit_generator(generator=genFramesTrain,
                             validation_data=genFramesVal,
                             epochs=diTrainTop["nEpochs"],
                             workers=4,
                             use_multiprocessing=True,
                             max_queue_size=8,
                             verbose=1,
                             callbacks=[csv_logger, cpTopLast, cpTopBest])

    # Fit entire I3D model
    print("Finetune all I3D layers with generator: %s" % (diTrainAll))
    keI3DOflow = layers_unfreeze(keI3DOflow)
    optimizer = keras.optimizers.Adam(lr=diTrainAll["fLearn"])
    keI3DOflow.compile(loss='categorical_crossentropy',
                       optimizer=optimizer,
                       metrics=['accuracy'])
    count_params(keI3DOflow)

    keI3DOflow.fit_generator(generator=genFramesTrain,
                             validation_data=genFramesVal,
                             epochs=diTrainAll["nEpochs"],
                             workers=4,
                             use_multiprocessing=True,
                             max_queue_size=8,
                             verbose=1,
                             callbacks=[csv_logger, cpAllLast, cpAllBest])

    return
sModelFile = "model_combined_mirror/115_rgb.h5"
#==== model load
h, w = 224, 224
keI3D = I3D_load(sModelFile, frames_num, (h, w, 2), 63)
#keI3D = I3D_load(sModelFile, frames_num, (h, w, 3), 63)
#keI3D = I3D_load(sModelFile, frames_num, (h, w, 2), 63)
input_type = 'combined_test'

sFolder = "%03d-%d" % (100, frames_num)
sOflowDir = "data-temp/%s/%s/oflow" % ('tsl', sFolder)
sImageDir = "data-temp/%s/%s/image" % ('tsl', sFolder)

genFramesTest_flow = FramesGenerator(sOflowDir + "/test_videos",
                                     1,
                                     frames_num,
                                     224,
                                     224,
                                     2,
                                     bShuffle=False)
genFramesTest_rgb = FramesGenerator(sImageDir + "/test_videos",
                                    1,
                                    frames_num,
                                    224,
                                    224,
                                    3,
                                    bShuffle=False,
                                    test_phase=True)
genFramesTest_combined = generate_generator_multiple(genFramesTest_rgb,
                                                     genFramesTest_flow)
#==== model input generator
select_gen = genFramesTest_combined
Example #7
0
def mnodel_fine_tune(diVideoSet, method='rgb'):
    # directories
    sFolder = "%03d-%d" % (diVideoSet["nClasses"], diVideoSet["nFramesNorm"])
    sClassFile = "data-set/%s/%03d/class.csv" % (diVideoSet["sName"],
                                                 diVideoSet["nClasses"])
    #sVideoDir        = "data-set/%s/%03d"%(diVideoSet["sName"], diVideoSet["nClasses"])
    if method == 'rgb':
        sImageDir = "data-temp/%s/%s/image" % (diVideoSet["sName"], sFolder)
    else:
        sImageDir = f"data-temp/%s/%s/image_{method}" % (diVideoSet["sName"],
                                                         sFolder)
    #sImageFeatureDir = "data-temp/%s/%s/image-i3d"%(diVideoSet["sName"], sFolder)
    sOflowDir = "data-temp/%s/%s/oflow" % (diVideoSet["sName"], sFolder)
    #sOflowFeatureDir = "data-temp/%s/%s/oflow-i3d"%(diVideoSet["sName"], sFolder)
    sModelDir = "model_combined_mirror"

    diTrainTop = {"fLearn": 1e-3, "nEpochs": 3}

    diTrainAll = {"fLearn": 1e-4, "nEpochs": 5}

    nBatchSize = 1

    print("\nStarting I3D end2end training ...")
    print(os.getcwd())

    # read the ChaLearn classes
    #oClasses = VideoClasses(sClassFile)

    # Load training data
    genFramesTrain_flow = FramesGenerator(sOflowDir + "/train_videos",
                                          nBatchSize,
                                          diVideoSet["nFramesNorm"],
                                          224,
                                          224,
                                          2,
                                          bShuffle=False)
    genFramesVal_flow = FramesGenerator(sOflowDir + "/val_videos",
                                        nBatchSize,
                                        diVideoSet["nFramesNorm"],
                                        224,
                                        224,
                                        2,
                                        bShuffle=False)
    genFramesTrain_rgb = FramesGenerator(sImageDir + "/train_videos",
                                         nBatchSize,
                                         diVideoSet["nFramesNorm"],
                                         224,
                                         224,
                                         3,
                                         bShuffle=False)
    genFramesVal_rgb = FramesGenerator(sImageDir + "/val_videos",
                                       nBatchSize,
                                       diVideoSet["nFramesNorm"],
                                       224,
                                       224,
                                       3,
                                       bShuffle=False)

    # Prep logging
    sLog = time.strftime("%Y%m%d-%H%M", time.gmtime()) + \
        "-%s%03d-%03d-combined-i3d"%(diVideoSet["sName"], diVideoSet["nClasses"], diVideoSet["nFramesNorm"])

    cpAllLast = tf.keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                   sLog + "-entire-last.h5",
                                                   verbose=0)
    cpAllBest = tf.keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                   sLog + "-entire-best.h5",
                                                   verbose=1,
                                                   save_best_only=True)

    keI3Dfusion = load_model(
        'model_combined_mirror/20200723-1559-tsl100-115-combined-i3d-entire-best.h5'
    )
    train_gen = generate_generator_multiple(genFramesTrain_rgb,
                                            genFramesTrain_flow)
    val_gen = generate_generator_multiple(genFramesVal_rgb, genFramesVal_flow)

    print("Finetune all I3D layers with generator: %s" % (diTrainAll))
    csv_logger = tf.keras.callbacks.CSVLogger("log_combined_mirror/" + sLog +
                                              "-acc_entire.csv",
                                              append=True)
    optimizer = keras.optimizers.Adam(lr=diTrainAll["fLearn"])
    keI3Dfusion.compile(loss='categorical_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
    count_params(keI3Dfusion)

    keI3Dfusion.fit_generator(generator=train_gen,
                              validation_data=val_gen,
                              epochs=diTrainAll["nEpochs"],
                              workers=4,
                              use_multiprocessing=False,
                              max_queue_size=8,
                              verbose=1,
                              callbacks=[csv_logger, cpAllLast, cpAllBest])

    return
Example #8
0
def train_I3D_combined_end2end(diVideoSet, method='rgb'):
    """ 
    * Loads pretrained I3D model, 
    * reads optical flow data generated from training videos,
    * adjusts top-layers adequately for video data,
    * trains only news top-layers,
    * then fine-tunes entire neural network,
    * saves logs and models to disc.
    """

    # directories
    sFolder = "%03d-%d" % (diVideoSet["nClasses"], diVideoSet["nFramesNorm"])
    sClassFile = "data-set/%s/%03d/class.csv" % (diVideoSet["sName"],
                                                 diVideoSet["nClasses"])
    #sVideoDir        = "data-set/%s/%03d"%(diVideoSet["sName"], diVideoSet["nClasses"])
    if method == 'rgb':
        sImageDir = "data-temp/%s/%s/image" % (diVideoSet["sName"], sFolder)
    else:
        sImageDir = f"data-temp/%s/%s/image_{method}" % (diVideoSet["sName"],
                                                         sFolder)
    #sImageFeatureDir = "data-temp/%s/%s/image-i3d"%(diVideoSet["sName"], sFolder)
    sOflowDir = "data-temp/%s/%s/oflow" % (diVideoSet["sName"], sFolder)
    #sOflowFeatureDir = "data-temp/%s/%s/oflow-i3d"%(diVideoSet["sName"], sFolder)
    sModelDir = "model_combined_mirror"

    diTrainTop = {"fLearn": 1e-3, "nEpochs": 3}

    diTrainAll = {"fLearn": 1e-4, "nEpochs": 17}

    nBatchSize = 1

    print("\nStarting I3D end2end training ...")
    print(os.getcwd())

    # read the ChaLearn classes
    #oClasses = VideoClasses(sClassFile)

    # Load training data
    genFramesTrain_flow = FramesGenerator(sOflowDir + "/train_videos",
                                          nBatchSize,
                                          diVideoSet["nFramesNorm"],
                                          224,
                                          224,
                                          2,
                                          bShuffle=False)
    genFramesVal_flow = FramesGenerator(sOflowDir + "/val_videos",
                                        nBatchSize,
                                        diVideoSet["nFramesNorm"],
                                        224,
                                        224,
                                        2,
                                        bShuffle=False)
    genFramesTrain_rgb = FramesGenerator(sImageDir + "/train_videos",
                                         nBatchSize,
                                         diVideoSet["nFramesNorm"],
                                         224,
                                         224,
                                         3,
                                         bShuffle=False)
    genFramesVal_rgb = FramesGenerator(sImageDir + "/val_videos",
                                       nBatchSize,
                                       diVideoSet["nFramesNorm"],
                                       224,
                                       224,
                                       3,
                                       bShuffle=False)

    # Load pretrained i3d model and adjust top layer
    print("Load pretrained I3D flow model ...")
    keI3DOflow = Inception_Inflated3d(
        include_top=False,
        weights='flow_imagenet_and_kinetics',
        #weights='model/20200704-1221-tsl100-oflow-i3d-entire-best.h5',
        input_shape=(diVideoSet["nFramesNorm"], 224, 224, 2))
    print("Add top layers with %d output classes ..." % 63)
    keI3DOflow = layers_freeze(keI3DOflow)
    keI3DOflow = add_i3d_top(keI3DOflow,
                             63,
                             dropout_prob=0.5,
                             late_fusion=True)

    print("Load pretrained I3D rgb model ...")
    keI3Drgb = Inception_Inflated3d(
        include_top=False,
        weights='rgb_imagenet_and_kinetics',
        #weights='model/20200704-1221-tsl100-oflow-i3d-entire-best.h5',
        input_shape=(diVideoSet["nFramesNorm"], 224, 224, 3),
        layer_name='RGB')
    print("Add top layers with %d output classes ..." % 63)
    keI3Drgb = layers_freeze(keI3Drgb)
    keI3Drgb = add_i3d_top(keI3Drgb,
                           63,
                           dropout_prob=0.5,
                           late_fusion=True,
                           layer_name='RGB')

    keI3Dfusion = model_fusion(keI3Drgb, keI3DOflow)

    # Prep logging
    sLog = time.strftime("%Y%m%d-%H%M", time.gmtime()) + \
        "-%s%03dclass-%03dframe-combined-%s-i3d"%(diVideoSet["sName"], diVideoSet["nClasses"], diVideoSet["nFramesNorm"], method)

    # Helper: Save results
    csv_logger = tf.keras.callbacks.CSVLogger("log_combined_mirror/" + sLog +
                                              "-acc_above.csv",
                                              append=True)

    # Helper: Save the model
    os.makedirs(sModelDir, exist_ok=True)
    cpTopLast = tf.keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                   sLog + "-above-last.h5",
                                                   verbose=0)
    cpTopBest = tf.keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                   sLog + "-above-best.h5",
                                                   verbose=1,
                                                   save_best_only=True)
    cpAllLast = tf.keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                   sLog + "-entire-last.h5",
                                                   verbose=0)
    cpAllBest = tf.keras.callbacks.ModelCheckpoint(filepath=sModelDir + "/" +
                                                   sLog + "-entire-best.h5",
                                                   verbose=1,
                                                   save_best_only=True)

    # Fit top layers
    print("Fit I3D top layers with generator: %s" % (diTrainTop))
    optimizer = keras.optimizers.Adam(lr=diTrainTop["fLearn"])
    keI3Dfusion.compile(loss='categorical_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
    count_params(keI3Dfusion)

    train_gen = generate_generator_multiple(genFramesTrain_rgb,
                                            genFramesTrain_flow)
    val_gen = generate_generator_multiple(genFramesVal_rgb, genFramesVal_flow)

    keI3Dfusion.fit_generator(generator=train_gen,
                              validation_data=val_gen,
                              epochs=diTrainTop["nEpochs"],
                              workers=4,
                              use_multiprocessing=False,
                              max_queue_size=8,
                              verbose=1,
                              callbacks=[csv_logger, cpTopLast, cpTopBest])

    # Fit entire I3D model
    print("Finetune all I3D layers with generator: %s" % (diTrainAll))
    csv_logger = tf.keras.callbacks.CSVLogger("log_combined_mirror/" + sLog +
                                              "-acc_entire.csv",
                                              append=True)
    keI3Dfusion = layers_unfreeze(keI3Dfusion)
    optimizer = keras.optimizers.Adam(lr=diTrainAll["fLearn"])
    keI3Dfusion.compile(loss='categorical_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
    count_params(keI3Dfusion)

    keI3Dfusion.fit_generator(generator=train_gen,
                              validation_data=val_gen,
                              epochs=diTrainAll["nEpochs"],
                              workers=4,
                              use_multiprocessing=False,
                              max_queue_size=8,
                              verbose=1,
                              callbacks=[csv_logger, cpAllLast, cpAllBest])

    return