def trainModel(model=None):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    # Returns:
    #   model: trained Keras model

    crop_range = 32  # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 224
    datasrc = "ilsvrc14"

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v3.AugSequence(target_size=target_size,
                                crop_range=crop_range,
                                batch_size=128,
                                datasrc=datasrc,
                                test=False,
                                debug=True)

    if model is None:
        input_shape = (target_size, target_size, 3)
        model = m_v7.prepModel ( input_shape = input_shape, \
            L1_size_stride_filters = (7, 2, 96), L1MaxPool_size_stride = (3, 2), \
            L2_size_stride_filters = (5, 2, 256), L2MaxPool_size_stride = (3, 2), \
            L3_size_stride_filters = (3, 1, 384), \
            L4_size_stride_filters = (3, 1, 384), \
            L5_size_stride_filters = (3, 1, 256), L5MaxPool_size_stride = (3, 2), \
            D1_size = 4096, \
            D2_size = 4096)

    full_epochs = 200

    #prepare a validation data generator, used for early stopping
    vldDataGen = dg_v1.prepDataGen(target_size=target_size,
                                   test=True,
                                   batch_size=128,
                                   datasrc=datasrc)
    callback_earlystop = EarlyStopping(monitor='val_acc',
                                       min_delta=0.01,
                                       patience=5,
                                       verbose=1,
                                       mode='max',
                                       restore_best_weights=True)

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    model.fit_generator(dataGen,
                        steps_per_epoch=len(dataGen),
                        epochs=full_epochs,
                        verbose=2,
                        validation_data=vldDataGen,
                        validation_steps=len(vldDataGen),
                        callbacks=[callback_earlystop])

    e_v3.eval(model, target_size=target_size, datasrc=datasrc)
    e_v3.eval(model, target_size=target_size, datasrc=datasrc, test=True)

    return model
Esempio n. 2
0
def trainModel( epochs = 1):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    #   epochs - number of max epochs to train (subject to early stopping)
    # Returns: 
    #   model: trained Keras model

    crop_range = 32 # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 224
    datasrc = "ilsvrc14_100boundingBoxes"
    #datasrc = "ilsvrc14"

    # "presumed mean" of X, subtract from all input
    #subtractMean=0.5

    # Load pre-calculated RGB mean, PCA (Principal Component Analysis) eigenvectors and eigenvalues
    #subtractMean=np.array ( [ 0.4493, 0.4542, 0.3901 ] )
    #subtractMean = np.load("..\\rgb_mean.npy")
    #pca_eigenvectors = np.load("..\\eigenvectors.npy")
    #pca_eigenvalues = np.load("..\\eigenvalues.npy")

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v5.AugSequence ( target_size=target_size, crop_range=crop_range, allow_hor_flip=True, batch_size=32, \
        #subtractMean=subtractMean, pca_eigenvectors=pca_eigenvectors, pca_eigenvalues=pca_eigenvalues, \
        preprocess="vgg", datasrc=datasrc, test=False )

    model = m_v12.prepModel (D2_dropout=0.5, Softmax_size=100)

    #prepare a validation data generator, used for early stopping
    #vldDataGen = dg_v1.prepDataGen( target_size=target_size, test=True, batch_size=128, datasrc=datasrc )
    vldDataGen = as_v5.AugSequence ( target_size=target_size, crop_range=1, allow_hor_flip=False, batch_size=48, \
        #subtractMean=subtractMean, 
        preprocess="vgg", datasrc=datasrc, test=True )
    callback_earlystop = EarlyStopping ( monitor='val_acc', min_delta=0.001, patience=20, verbose=1, mode='max', restore_best_weights=True )

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2, validation_data=vldDataGen, validation_steps=len(vldDataGen), callbacks=[callback_earlystop] )

    #print ("Evaluation on train set (1 frame)")
    #e_v2.eval(model, target_size=target_size,  datasrc=datasrc)
    print ("Evaluation on validation set (1 frame)")
    e_v2.eval(model, target_size=target_size, datasrc=datasrc, preprocess="vgg", test=True)
    print ("Evaluation on validation set (5 frames)")
    e_v3.eval(model, target_size=target_size, datasrc=datasrc, preprocess="vgg", test=True)
    print ("Evaluation on validation set (10 frames)")
    e_v4.eval(model, target_size=target_size, datasrc=datasrc, preprocess="vgg", test=True)

    return model
def trainModel( model = None, epochs = 1):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    #   epochs - number of max epochs to train (subject to early stopping)
    # Returns: 
    #   model: trained Keras model

    crop_range = 32 # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 224
    datasrc = "ilsvrc14_50classes"
    #datasrc = "ilsvrc14"

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v3.AugSequence ( target_size=target_size, crop_range=crop_range, batch_size=128, datasrc=datasrc, test=False, debug=True )

    if model is None:
        input_shape = (target_size, target_size, 3)
        model = m_v8.prepModel ( input_shape = input_shape, \
            L1_size_stride_filters = (11, 4, 96), L1MaxPool_size_stride = (3, 2), L1_dropout = 0.0, \
            L2_size_stride_filters = (5, 1, 256), L2MaxPool_size_stride = (3, 2), L2_dropout = 0.0, \
            L3_size_stride_filters = (3, 1, 384),                                 L3_dropout = 0.0, \
            L4_size_stride_filters = (3, 1, 384),                                 L4_dropout = 0.0, \
            L5_size_stride_filters = (3, 1, 256), L5MaxPool_size_stride = (3, 2), L5_dropout = 0.0, \
            D1_size = 4096,                                                       D1_dropout = 0.2, \
            D2_size = 4096,                                                       D2_dropout = 0.3, \
            Softmax_size = 50, \
            Conv_padding = "same" )

    #prepare a validation data generator, used for early stopping
    vldDataGen = dg_v1.prepDataGen( target_size=target_size, test=True, batch_size=128, datasrc=datasrc )
    callback_earlystop = EarlyStopping ( monitor='val_acc', min_delta=0.001, patience=20, verbose=1, mode='max', restore_best_weights=True )

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2, validation_data=vldDataGen, validation_steps=len(vldDataGen), callbacks=[callback_earlystop] )
    #model.fit_generator ( dataGen, steps_per_epoch=1, epochs=epochs, verbose=2, validation_data=vldDataGen, validation_steps=len(vldDataGen), callbacks=[callback_earlystop] )
    #model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2 )
    #model.fit_generator ( dataGen, steps_per_epoch=1, epochs=epochs, verbose=2 )

    print ("Evaluation on train set (1 frame)")
    e_v2.eval(model, target_size=target_size, datasrc=datasrc)
    print ("Evaluation on validation set (1 frame)")
    e_v2.eval(model, target_size=target_size, datasrc=datasrc, test=True)
    print ("Evaluation on validation set (5 frames)")
    e_v3.eval(model, target_size=target_size, datasrc=datasrc, test=True)
    print ("Evaluation on validation set (10 frames)")
    e_v4.eval(model, target_size=target_size, datasrc=datasrc, test=True)

    return model
Esempio n. 4
0
def trainModel(model=None):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    # Returns:
    #   model: trained Keras model

    crop_range = 32  # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 224
    datasrc = "ilsvrc14"

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v3.AugSequence(target_size=target_size,
                                crop_range=crop_range,
                                batch_size=128,
                                datasrc=datasrc,
                                test=False,
                                debug=True)

    if model is None:
        input_shape = (target_size, target_size, 3)
        model = m_v7.prepModel ( input_shape = input_shape, \
            L1_size_stride_filters = (7, 2, 96), L1MaxPool_size_stride = (3, 2), \
            L2_size_stride_filters = (5, 2, 256), L2MaxPool_size_stride = (3, 2), \
            L3_size_stride_filters = (3, 1, 384), \
            L4_size_stride_filters = (3, 1, 384), \
            L5_size_stride_filters = (3, 1, 256), L5MaxPool_size_stride = (3, 2), \
            D1_size = 4096, \
            D2_size = 4096)

    full_epochs = 200

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    model.fit_generator(dataGen,
                        steps_per_epoch=len(dataGen),
                        epochs=full_epochs,
                        verbose=2)

    e_v3.eval(model, target_size=target_size, datasrc=datasrc)
    e_v3.eval(model, target_size=target_size, datasrc=datasrc, test=True)

    return model
def trainModel(model=None, epochs=1):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    #   epochs - number of max epochs to train (subject to early stopping)
    # Returns:
    #   model: trained Keras model

    crop_range = 32  # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 224
    datasrc = "ilsvrc14_50classes"
    #datasrc = "ilsvrc14"

    # "presumed mean" of X, subtract from all input
    #subtractMean=0.5

    # Load pre-calculated RGB mean, PCA (Principal Component Analysis) eigenvectors and eigenvalues
    #subtractMean=np.array ( [ 0.4493, 0.4542, 0.3901 ] )
    subtractMean = np.load("..\\rgb_mean.npy")
    pca_eigenvectors = np.load("..\\eigenvectors.npy")
    pca_eigenvalues = np.load("..\\eigenvalues.npy")

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v4.AugSequence ( target_size=target_size, crop_range=crop_range, allow_hor_flip=True, batch_size=128, \
        subtractMean=subtractMean, pca_eigenvectors=pca_eigenvectors, pca_eigenvalues=pca_eigenvalues, \
        datasrc=datasrc, test=False )

    if model is None:
        input_shape = (target_size, target_size, 3)
        model = m_v8.prepModel ( input_shape = input_shape, \
            L1_size_stride_filters = (11, 4, 96), L1MaxPool_size_stride = (3, 2), L1_dropout = 0.0, \
            L2_size_stride_filters = (5, 1, 256), L2MaxPool_size_stride = (3, 2), L2_dropout = 0.0, \
            L3_size_stride_filters = (3, 1, 384),                                 L3_dropout = 0.0, \
            L4_size_stride_filters = (3, 1, 384),                                 L4_dropout = 0.0, \
            L5_size_stride_filters = (3, 1, 256), L5MaxPool_size_stride = (3, 2), L5_dropout = 0.0, \
            D1_size = 4096,                                                       D1_dropout = 0.5, \
            D2_size = 4096,                                                       D2_dropout = 0.55, \
            Softmax_size = 50, \
            Conv_padding = "same" )

    #prepare a validation data generator, used for early stopping
    #vldDataGen = dg_v1.prepDataGen( target_size=target_size, test=True, batch_size=128, datasrc=datasrc )
    vldDataGen = as_v3.AugSequence(target_size=target_size,
                                   crop_range=1,
                                   allow_hor_flip=False,
                                   batch_size=128,
                                   subtractMean=subtractMean,
                                   datasrc=datasrc,
                                   test=True)
    callback_earlystop = EarlyStopping(monitor='val_acc',
                                       min_delta=0.001,
                                       patience=20,
                                       verbose=1,
                                       mode='max',
                                       restore_best_weights=True)

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    model.fit_generator(dataGen,
                        steps_per_epoch=len(dataGen),
                        epochs=epochs,
                        verbose=2,
                        validation_data=vldDataGen,
                        validation_steps=len(vldDataGen),
                        callbacks=[callback_earlystop])
    #model.fit_generator ( dataGen, steps_per_epoch=1, epochs=epochs, verbose=2, validation_data=vldDataGen, validation_steps=len(vldDataGen), callbacks=[callback_earlystop] )
    #model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2 )
    #model.fit_generator ( dataGen, steps_per_epoch=1, epochs=epochs, verbose=2 )

    print("Evaluation on train set (1 frame)")
    e_v2.eval(model,
              target_size=target_size,
              subtractMean=subtractMean,
              datasrc=datasrc)
    print("Evaluation on validation set (1 frame)")
    e_v2.eval(model,
              target_size=target_size,
              subtractMean=subtractMean,
              datasrc=datasrc,
              test=True)
    print("Evaluation on validation set (5 frames)")
    e_v3.eval(model,
              target_size=target_size,
              subtractMean=subtractMean,
              datasrc=datasrc,
              test=True)
    print("Evaluation on validation set (10 frames)")
    e_v4.eval(model,
              target_size=target_size,
              subtractMean=subtractMean,
              datasrc=datasrc,
              test=True)

    return model
def trainModel(epochs=1):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    #   epochs - number of max epochs to train (subject to early stopping)
    # Returns:
    #   model: trained Keras model

    crop_range = 1  # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 256
    datasrc = "sco_v3"
    #Softmax_size=33

    # "presumed mean" of X, subtract from all input
    #subtractMean=0.5

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v5.AugSequence ( target_size=target_size, crop_range=crop_range, allow_hor_flip=True, batch_size=32, \
        #subtractMean=subtractMean, pca_eigenvectors=pca_eigenvectors, pca_eigenvalues=pca_eigenvalues, \
        preprocess="div255", datasrc=datasrc, test=False, debug=True )

    Softmax_size = len(dataGen.dataGen().class_indices)

    #model = m_v7.prepModel (Softmax_size=Softmax_size, L1MaxPool_size_stride=(2,2), L2MaxPool_size_stride=(2,2), L5MaxPool_size_stride=(2,2) )
    model = m_v13.prepModel(target_size=target_size, Softmax_size=Softmax_size)
    model.summary()

    #prepare a validation data generator, used for early stopping
    vldDataGen = as_v5.AugSequence ( target_size=target_size, crop_range=1, allow_hor_flip=False, batch_size=32, \
        preprocess="div255", datasrc=datasrc, test=True )
    callback_earlystop = EarlyStopping(
        monitor='val_acc', min_delta=0.001, patience=20, verbose=1,
        mode='max')  #, restore_best_weights=True )

    lc_file_name = lc_file_pattern.format(date.today().strftime("%Y%m%d"),
                                          Softmax_size)
    callback_csv_logger = CSVLogger(lc_file_name + '.csv',
                                    separator=",",
                                    append=False)

    #callback_earlystop = EarlyStopping ( monitor='val_acc', min_delta=0., patience=0, verbose=2, mode='auto', restore_best_weights=True )
    model_file_name = model_file_pattern.format(
        date.today().strftime("%Y%m%d"), Softmax_size)
    mcp_save = ModelCheckpoint(model_file_name,
                               save_best_only=True,
                               monitor='val_acc',
                               mode='max')

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    model.fit_generator(
        dataGen,
        steps_per_epoch=len(dataGen),
        epochs=epochs,
        verbose=2,
        validation_data=vldDataGen,
        validation_steps=len(vldDataGen),
        callbacks=[callback_earlystop, mcp_save, callback_csv_logger])
    #model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2 )

    # Loading best saved model
    model = load_model(model_file_name)

    #print ("Evaluation on train set (1 frame)")
    #e_v2.eval(model, target_size=target_size,  datasrc=datasrc)
    print("Evaluation on validation set (1 frame)")
    e_v2.eval(model,
              target_size=target_size,
              datasrc=datasrc,
              preprocess="div255",
              test=True)
    print("Evaluation on validation set (5 frames)")
    e_v3.eval(model,
              target_size=target_size,
              datasrc=datasrc,
              preprocess="div255",
              test=True)
    print("Evaluation on validation set (10 frames)")
    e_v4.eval(model,
              target_size=target_size,
              datasrc=datasrc,
              preprocess="div255",
              test=True)

    #print ("Evaluation on test set (1 frame)")
    e_v2.eval(model,
              target_size=target_size,
              datasrc=datasrc,
              preprocess="div255",
              testtest=True)

    return model