示例#1
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          class_limit=None,
          image_shape=None,
          load_to_memory=False,
          batch_size=32,
          nb_epoch=100):

    checkpointer = ModelCheckpoint(
        filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        verbose=1,
        save_best_only=True)

    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    if load_to_memory:
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger],
                     epochs=nb_epoch)
    else:
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)
示例#2
0
def train(sequence_length,
          image_shape,
          batch_size,
          nb_epoch):
    
    filepath = os.path.join('data', 'checkpoints', 'ConvLSTM.{epoch:03d}-{mse:.5f}.hdf5')
    
    # helper: save model 
    checkpointer = ModelCheckpoint(filepath = filepath,
                                    monitor='mse',
                                    verbose=2,
                                    save_best_only=True,
                                    save_weights_only=False,
                                    mode='auto')
    
    # helper: stop training when model converges
    early_stopper = EarlyStopping(monitor='mse',
                              min_delta=0,
                              patience=10,
                              restore_best_weights=True)
    
    # Get the training data
    data = DataSet(
        sequence_length=sequence_length,
        image_shape=image_shape)
    
    # Get samples per epoch.
    # Multiply by 0.7 to estimate how much data is the train set
    steps_per_epoch = (len(data.data) * 0.70) // batch_size
    # Multiply by 0.3 to estimate how much data is the validation set
    validation_steps = (len(data.data) * 0.30) // batch_size

    # Data generators
    generator = data.frame_generator(batch_size, 'train', augment = True)
    val_generator = data.frame_generator(batch_size, 'test', augment = False)

    # Get the model
    model = lstm_model()

    # Train the model
    history = model.fit_generator(generator=generator,
                            steps_per_epoch=steps_per_epoch,
                            epochs=nb_epoch,
                            verbose=0,
                            callbacks=[early_stopper, checkpointer],
                            validation_data=val_generator,
                            validation_steps=validation_steps)

    # Close GPU session
    session.close()
示例#3
0
def train (batch_size, nb_epoch,saved_model=None):
    # model can be only 'lstm'
    if settings.DATA_TYPE is "images":
        model = "lrcn"
    else:
        model = 'lstm'


    checkpointer =  tf.keras.callbacks.ModelCheckpoint(
        #filepath=os.path.join(settings.OUTPUT_CHECKPOINT_FOLDER, model + '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        filepath=os.path.join(settings.OUTPUT_CHECKPOINT_FOLDER, model + '.v1.hdf5'),
        verbose=1,)
       #)

    # Helper: TensorBoard
    tb = tf.keras.callbacks.TensorBoard(log_dir=os.path.join(settings.OUTPUT_LOG, model))

    # Helper: Stop when we stop learning.
    # early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger =tf.keras.callbacks.CSVLogger(os.path.join(settings.OUTPUT_LOG, model + '-' + 'training-' + str(timestamp) + '.log'))

    data = DataSet(settings.SCRIPT_EXTRACT_SEQ_SPLIT_PATH)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = data.len_train_data()/batch_size

    generator = data.frame_generator(batch_size, 'train')
    val_generator = data.frame_generator(batch_size, 'valid')

    from tensorflow.keras.mixed_precision import experimental as mixed_precision
    policy = mixed_precision.Policy('mixed_float16')
    mixed_precision.set_policy(policy)

    # Get the model.
    rm = RNNModels_2_1(len(settings.VIDEO_CATEGORIES_ADD), model, settings.SEQ_LEN, saved_model,settings.CNN_FEATURE_LEN)

    rm.model.fit_generator(
        generator=generator,
        steps_per_epoch=steps_per_epoch,
        epochs=1,
        verbose=1,
        callbacks=[tb, csv_logger, checkpointer],
        validation_data=val_generator,
        validation_steps=100)
def validate(data_type,
             model,
             seq_length=40,
             saved_model=None,
             class_limit=None,
             image_shape=None):
    test_data_num = 1084
    batch_size = 32

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    test_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    #rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    model = load_model(saved_model)

    # Evaluate!
    #results = rm.model.evaluate_generator(
    #   generator=val_generator,
    #  val_samples=3200)
    results = model.evaluate_generator(generator=test_generator,
                                       steps=test_data_num // batch_size)
    print(results)
    print(model.metrics)
    print(model.metrics_names)
示例#5
0
def validate(data_type,
             model,
             seq_length=40,
             saved_model=None,
             class_limit=None,
             image_shape=None):
    batch_size = 8

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Evaluate!
    results = rm.model.evaluate_generator(generator=val_generator,
                                          val_samples=3200)

    print(results)
    print(rm.model.metrics_names)
示例#6
0
def validate(data_type,
             model,
             seq_length=125,
             saved_model=None,
             concat=False,
             class_limit=None,
             image_shape=None):
    batch_size = 1

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    val_generator = data.frame_generator(batch_size, 'test', data_type, concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Evaluate!
    prediction = rm.model.predict_generator(
        generator=val_generator,
        val_samples=4)  #put the value as the number of test files
    prediction = prediction.tolist()
    print(prediction)
    print("===========================")
    prediction1 = pd.DataFrame(prediction).to_csv('prediction.csv')
def validate(data_type, model, seq_length=40, saved_model=None,
             class_limit=None, image_shape=None):
    batch_size = 32

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Evaluate!
    results = rm.model.evaluate_generator(
        generator=val_generator,
        val_samples=3200)

    print(results)
    print(rm.model.metrics_names)
def validate(data_type, model, seq_length=40, saved_model=None,
             class_limit=None, image_shape=None):
    batch_size = 463

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # # Evaluate!
    # results = rm.model.evaluate_generator(
    #    generator=val_generator,
    #    steps=10)
    #
    # print(results)
    # print(rm.model.metrics_names)

    print('Classification Metric for testing phase \n')
    metric_calculation(val_generator, rm.model, 0)
示例#9
0
def predict(data_type, seq_length, saved_model, image_shape, video_name, class_limit):
    model = load_model(saved_model)

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length, image_shape=image_shape,
            class_limit=class_limit)
    
    # Extract the sample from the data.
    #sample = data.get_frames_by_filename(video_name, data_type)

    for X, y in data.frame_generator(2, 'test', "images"):
        # Predict!
        prediction = model.predict(X)
        print(prediction)
        data.print_class_from_prediction(prediction[0])
        print()
        data.print_class_from_prediction(prediction[1])
        print('-------------------------')
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          class_limit=None,
          image_shape=None,
          config=None):

    if config is not None:
        load_to_memory = config.videoLoadToMemory
        batch_size = config.videoBatchSize
        nb_epoch = config.videoEpochs
        repo_dir = config.repoDir
        feature_file_path = config.featureFileName
        work_dir = config.workDir
        lr = config.videoLearningRate
        decay = config.videoDecay
        classlist = config.classes
    else:
        load_to_memory = False
        batch_size = 32
        nb_epoch = 100
        repo_dir = ''
        feature_file_path = 'data/data_file.csv'
        work_dir = 'data'
        lr = 1e-5
        decay = 1e-6
        classlist = []

    # Helper: Save the model.
    checkpointpath = os.path.join(work_dir, 'checkpoints')
    if not os.path.exists(checkpointpath):
        print("Creating checkpoint folder [%s]", checkpointpath)
        os.makedirs(checkpointpath)
    checkpointer = ModelCheckpoint(
        filepath=os.path.join(work_dir, 'checkpoints', model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    logpath = os.path.join(work_dir, 'logs')
    if not os.path.exists(logpath):
        print("Creating log folder [%s]", logpath)
        os.makedirs(logpath)
    tb = TensorBoard(log_dir=os.path.join(work_dir, 'logs', model))

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join(logpath, model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       repo_dir=repo_dir,
                       feature_file_path=feature_file_path,
                       work_dir=work_dir,
                       classlist=classlist)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape,
                       repo_dir=repo_dir,
                       feature_file_path=feature_file_path,
                       work_dir=work_dir,
                       classlist=classlist)
    # Check if data is sufficient
    if False == data.check_data(batch_size):
        print("Insufficient data")
        sys.exit(0)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model, lr,
                        decay)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger],
                     epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)
示例#11
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          concat=False,
          class_limit=None,
          image_shape=None,
          load_to_memory=False):
    # Set variables.
    nb_epoch = 1000
    batch_size = 8
    seq_length = 125

    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath=os.getcwd()+'\\data\\checkpoints\\' + model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=2,
        save_best_only=True)
    lrScheduler = ReduceLROnPlateau(monitor='val_loss',
                                    factor=0.5,
                                    patience=1,
                                    cooldown=1,
                                    verbose=2)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir=os.getcwd() + '\\data\\logs')

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.getcwd()+'\\data\\logs\\' + model + '-' + 'training-' + \
        str(timestamp) + '.log')

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data))

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory(batch_size, 'train', data_type,
                                                concat)
        X_test, y_test = data.get_all_sequences_in_memory(
            batch_size, 'test', data_type, concat)


##        pathy = os.getcwd()+'/y.npy'
##        numpy.save(pathy,y)
##        pathyt = os.getcwd()+'/y_test.npy'
##        numpy.save(pathyt,y_test)

    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type,
                                         concat)
        val_generator = data.frame_generator(batch_size, 'test', data_type,
                                             concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)
    print("research model")
    print(rm.model.summary())
    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     shuffle=True,
                     verbose=2,
                     callbacks=[
                         checkpointer, tb, early_stopper, csv_logger,
                         lrScheduler
                     ],
                     epochs=nb_epoch)
        print("from load to memory")
    else:
        # Use fit generator.
        rm.model.fit_generator(generator=generator,
                               steps_per_epoch=steps_per_epoch,
                               epochs=nb_epoch,
                               verbose=2,
                               shuffle=True,
                               callbacks=[
                                   checkpointer, tb, early_stopper, csv_logger,
                                   lrScheduler
                               ],
                               validation_data=val_generator,
                               validation_steps=10)
        print("from generator")
示例#12
0
def train(data_type, seq_length, model, saved_model=None,
          class_limit=None, image_shape=None,
          load_to_memory=False, batch_size=32, nb_epoch=100):
    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + \
            '.best2.hdf5'),
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Balance the class weights!
    print("setting weights!:")
    flashing = 0
    not_flashing = 0
    unknown = 0
    for label in y:
        if label[0]:
            flashing = flashing + 1
        elif label[1]:
            not_flashing = not_flashing + 1
        else:
            unknown = unknown + 1
    raw = [flashing,not_flashing,unknown]
    dist = [sum(raw)/float(i) for i in raw]
    class_weights = {1:dist[0], 2:dist[1], 3:dist[2]}
    print(class_weights)

    # Use custom metrics because acc is garbage
    print("setting metrics!")
    metrics = Metrics()

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(
            X,
            y,
            batch_size=batch_size,
            validation_data=(X_test, y_test),
            verbose=1,
            callbacks=[tb,metrics],
            epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j,
                 i,
                 format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()
    plt.show()


data = DataSet(seq_length=36, class_limit=None)

val_generator = data.frame_generator(300, 'test', "features")
a = 0
X = []
y = []
for i, j in val_generator:
    a = a + 1
    X.append(i)
    y.append(j)
    if (a == 1):
        break

model = load_model("data/checkpoints/lstm-features.012-0.180.hdf5")

predictions = model.predict(X)
predicted_classes = np.argmax(predictions, axis=1)
print(predicted_classes)
示例#14
0
def validate(data_type, model, seq_length=60, saved_model=None,
             class_limit=None, image_shape=None):
    batch_size = 16

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Evaluate!
    results = rm.model.evaluate_generator(
        generator=val_generator,
        val_samples=15,
        use_multiprocessing=True,
        workers=1)

    print(results)
    print(rm.model.metrics_names)
    
    
        #val_generator = data.frame_generator(1,'test', data_type)
    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)
    # Evaluate!
    scores=np.zeros([14])
    total=np.zeros([14])

    val_trues=[]
    val_preds=[]
    for X,y in data.gen_test('test', data_type): 
        results = rm.model.predict(X)
        predicted=np.argmax(results, axis=-1)
        idx=np.where(np.array(y)==1)
        true_label=idx[1]
        print(true_label)

        total[true_label]=total[true_label]+1
        
        print(len(predicted))
        print(len(true_label))
        if predicted[0]==true_label[0]:
            scores[true_label]=scores[true_label]+1
        
        
        

    
    #val_preds = np.argmax(results, axis=-1)
    
 
    print('Confusion Matrix')
    tn, fp, fn, tp =confusion_matrix(true_label, predicted).ravel()
    print("\n *** Confusion matrix**")
    print(tp)
    print(tn)
    print(fp)
    print(fn)
    print('\n****************')
    
    print(classification_report(true_label, predicted))
    print(scores)
    print('\n****************')
    print(total)
示例#15
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          class_limit=None,
          image_shape=None,
          load_to_memory=False,
          batch_size=32,
          nb_epoch=100):
    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger],
                     epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)
示例#16
0
def train(data_type, seq_length, model, saved_model_extractnet=None,saved_model_lstm=None,
          concat=False, class_limit=None, image_shape=None,
          load_to_memory=False):
    # Set variables.
    nb_epoch = 1000
    batch_size = 32

    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath='./data/checkpoints/' + model + '16-40-conv-lstm-mixed-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir='./data/logs')

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=10)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger('./data/logs/' + model + '-' + 'training-' + \
        str(timestamp) + '.log')

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory(batch_size, 'train', data_type)
        print X.shape
        X_test, y_test = data.get_all_sequences_in_memory(batch_size, 'test', data_type)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type, concat)
        val_generator = data.test_frame(batch_size, 'test', data_type, concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model_extractnet=saved_model_extractnet,
                         saved_model_lstm=saved_model_lstm)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X, y,
            batch_size=batch_size,
            validation_data=(X_test, y_test),
            shuffle=False,
            verbose=1,
            callbacks=[checkpointer, tb, early_stopper, csv_logger],
            epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[checkpointer, tb, early_stopper, csv_logger],
            validation_data=val_generator,
            validation_steps=20)
示例#17
0
print("Initializing Datasets and Dataloaders...")

data = DataSet(seq_length=seq_length)

#steps_per_epoch = (len(data.data) * 0.7) // batch_size

load_to_memory = False
data_type = 'features'
if load_to_memory:
    # Get data.
    X, y = data.get_all_sequences_in_memory(batch_size, 'train', data_type)
    X_test, y_test = data.get_all_sequences_in_memory(batch_size, 'test',
                                                      data_type)
else:
    # Get generators.
    generator = data.frame_generator(batch_size, 'train', data_type)
    val_generator = data.frame_generator(batch_size, 'test', data_type)

dataloaders_dict = {'train': generator, 'val': val_generator}

model_ft, hist = train_model(
    model,
    dataloaders_dict,
    criterion,
    optimizer,
    scheduler,
    num_epochs=num_epochs,
    steps_per_epoch=steps_per_epoch,
    val_steps=val_steps,
)
示例#18
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          concat=False,
          class_limit=None,
          image_shape=None,
          load_to_memory=False):
    # Set variables.
    nb_epoch = 1000
    # 32 for LSTM; 8 for CRNN; 5 for CNN-3d
    if model == 'conv_3d' or model == 'c3d':
        batch_size = 5
    elif model == 'crnn' or model == 'lrcn':
        batch_size = 8
    else:
        batch_size = 32

    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath='/hdd/hpc/Projects/Weather/121201_Vi/data/checkpoints/' + model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir='/hdd/hpc/Projects/Weather/121201_Vi/data/logs')

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=10)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger('/hdd/hpc/Projects/Weather/121201_Vi/data/logs/' + model + '-' + 'training-' + \
        str(timestamp) + '.log')

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    #steps_per_epoch = (len(data.data) * 0.7) // batch_size
    train, test, validation = data.split_train_test()
    steps_per_epoch = len(train) // batch_size

    if load_to_memory:
        # Get data.
        # X, y = data.get_all_sequences_in_memory(batch_size, 'train', data_type, concat)
        # X_val, y_val = data.get_all_sequences_in_memory(batch_size, 'validation', data_type, concat)
        # X_test, y_test = data.get_all_sequences_in_memory(batch_size, 'test', data_type, concat)
        X, y = data.get_all_sequences_in_memory('train', data_type, concat)
        X_val, y_val = data.get_all_sequences_in_memory(
            'validation', data_type, concat)
        X_test, y_test = data.get_all_sequences_in_memory(
            'test', data_type, concat)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type,
                                         concat)
        val_generator = data.frame_generator(batch_size, 'validation',
                                             data_type, concat)
        test_generator = data.frame_generator(batch_size, 'test', data_type,
                                              concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(
            X,
            y,
            batch_size=batch_size,
            validation_data=(X_val, y_val),
            verbose=1,
            callbacks=[checkpointer, tb, early_stopper, csv_logger],
            # callbacks=[checkpointer, tb, csv_logger], #early_stopper, csv_logger],
            epochs=nb_epoch)

        # evaluate model
        results = rm.model.evaluate(X_test,
                                    y_test,
                                    batch_size=batch_size,
                                    verbose=1)
        print()
        print('Evaluation results on test data is:')
        print(results)
        print(rm.model.metrics_names)

    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[checkpointer, tb, early_stopper, csv_logger],
            # callbacks=[checkpointer, tb, csv_logger], #early_stopper, csv_logger],
            validation_data=val_generator,
            validation_steps=10)

        # Evaluate!
        results = rm.model.evaluate_generator(generator=test_generator,
                                              steps=len(test) // batch_size)
        print()
        print('Evaluation results on test data is:')
        print(results)
        print(rm.model.metrics_names)
def train(data_type, seq_length, model, saved_model=None,
          class_limit=None, image_shape=None,
          load_to_memory=False, batch_size=32, nb_epoch=100):
    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(
            X,
            y,
            batch_size=batch_size,
            validation_data=(X_test, y_test),
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger],
            epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)
示例#20
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          concat=False,
          class_limit=None,
          image_shape=None,
          load_to_memory=False):

    #Video-Classification-CNN-and-LSTM--master
    #callbacks = [ EarlyStopping(monitor='val_loss', patience=10, verbose=0),
    #ModelCheckpoint('video_1_LSTM_1_1024.h5', monitor='val_loss', save_best_only=True, verbose=0) ]
    # nb_epoch = 500

    # Set variables.
    nb_epoch = 1000
    batch_size = 32

    checkpointer = ModelCheckpoint(
        filepath='./data/checkpoints/' + model + '-' + data_type +  \
            '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=1,
        save_best_only=True)

    # filepath='./data/checkpoints/try.hdf5',

    # Helper: TensorBoard
    tb = TensorBoard(log_dir='./data/logs')

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=200)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger('./data/logs/' + model + '-' + 'training-' + \
        str(timestamp) + '.log')

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory(batch_size, 'train', data_type,
                                                concat)
        X_test, y_test = data.get_all_sequences_in_memory(
            batch_size, 'test', data_type, concat)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type,
                                         concat)
        val_generator = data.frame_generator(batch_size, 'test', data_type,
                                             concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    #model.fit(train_data,train_labels,validation_data=(validation_data,validation_labels),
    #batch_size=batch_size,nb_epoch=nb_epoch,callbacks=callbacks,shuffle=True,verbose=1)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[checkpointer, tb, early_stopper, csv_logger],
                     epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[checkpointer, tb, early_stopper, csv_logger],
            validation_data=val_generator,
            validation_steps=10)
示例#21
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          class_limit=None,
          image_shape=None,
          load_to_memory=False,
          batch_size=32,
          nb_epoch=100):
    #Save the Model
    checkpointer = ModelCheckpoint(
        filepath=os.path.join('data', 'savedmodels', model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        verbose=1,
        save_best_only=True)

    #TensorBoard
    tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))

    #Stop after 5 epochs when there is no progress in Learning
    early_stopper = EarlyStopping(patience=5)

    #Save Results in csv format
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    #Process the Data
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    #Get Steps per epoch
    #Guess how much of data.data is Train data by multiplying with 0.7
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        #Get Data
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        #Get Generators
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    #Get Model
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    #Fit by using Standard fit
    if load_to_memory:
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger],
                     epochs=nb_epoch)
    else:
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)
示例#22
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          concat=False,
          class_limit=None,
          image_shape=None,
          load_to_memory=False):
    # Set variables.
    nb_epoch = 1000
    batch_size = 16

    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath='./data/checkpoints/' + model + '-' + data_type + \
                 '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=1,
        save_best_only=True)

    incepcheck = ModelCheckpoint(
        filepath='./data/checkpoints/' + model + '-' + data_type + \
                 '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=1,
        save_best_only=True,
        save_weights_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir='./data/logs')

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=10)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger('./data/logs/' + model + '-' + 'training-' + \
                           str(timestamp) + '.log')

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory(batch_size, 'train', data_type,
                                                concat)
        X_test, y_test = data.get_all_sequences_in_memory(
            batch_size, 'test', data_type, concat)

    elif model == 'div_crnn':
        generator = data.frame_generator2(batch_size, 'train', data_type,
                                          concat)
        val_generator = data.frame_generator2(batch_size, 'test', data_type,
                                              concat)

    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type,
                                         concat)
        val_generator = data.frame_generator(batch_size, 'test', data_type,
                                             concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # model_json_str = rm.model.to_json()
    # open('/home/takubuntu/PycharmProjects/DL/Wake_detect/IR_classification/data/checkpoints/json_model.json','w').write(model_json_str)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[checkpointer, tb, csv_logger],
                     epochs=nb_epoch)
    # elif model == 'inception*':
    #     rm.model.fit_generator(
    #         generator=generator,
    #         steps_per_epoch=steps_per_epoch,
    #         epochs=nb_epoch,
    #         verbose=1,
    #         callbacks=[incepcheck, tb, csv_logger],
    #         validation_data=val_generator,
    #         validation_steps=10)
    else:
        # Use fit generator.
        rm.model.fit_generator(generator=generator,
                               steps_per_epoch=steps_per_epoch,
                               epochs=nb_epoch,
                               verbose=1,
                               callbacks=[checkpointer, tb, csv_logger],
                               validation_data=val_generator,
                               validation_steps=10)
示例#23
0
def train(data_type, seq_length, model, learning_rate,learning_decay,saved_model=None,
          class_limit=None, image_shape=None,
          load_to_memory=False, batch_size=32, nb_epoch=100):
    print('trainig_num is ', training_num)
    if model == 'lstm_regression':
        regression = 1
        sequence_len = 20
        monitor_par = 'val_loss'
    else:
        regression = 0
        sequence_len = seq_length
        monitor_par = 'val_acc'

    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath=os.path.join(main_folder, 'checkpoints',model+'2', model + '-{epoch:03d}.hdf5'),
        #filepath=os.path.join(main_folder, 'checkpoints',model, model + '-' + data_type + \
            #'.{epoch:03d}-{val_loss:.3f}.hdf5'),
        monitor=monitor_par,
        verbose=1,
        save_best_only=True)

    # # Helper: TensorBoard
    # tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))

    # # Helper: Stop when we stop learning.
    # early_stopper = EarlyStopping(patience=5)

    # Helper: Save results.
    #timestamp = time.time()
    csv_logger = CSVLogger(os.path.join(main_folder, 'logs', model +'2'+'-' + 'training-log' + '.csv'))

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    #steps_per_epoch = (len(data.data) * 0.7) // batch_size
    steps_per_epoch = training_num // batch_size
    print('step is: %d'%steps_per_epoch)

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
       
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type,regression)
        val_generator = data.frame_generator(batch_size, 'test', data_type,regression)

    # Get the model.
    
    rm = ResearchModels(len(data.classes), model, sequence_len, learning_rate,learning_decay,saved_model)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        hist = rm.model.fit(
            X,
            y,
            batch_size=batch_size,
            validation_data=(X_test, y_test),
            verbose=1,
            callbacks=[csv_logger],
            epochs=nb_epoch)
    else:
        # Use fit generator.
        
        hist = rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch, # in each epoch all the training data are evaluated
            epochs=nb_epoch,
            verbose=1,
            callbacks=[csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4) # if you see that GPU is idling and waiting for batches, try to increase the amout of workers
    return hist
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          class_limit=None,
          image_shape=None,
          load_to_memory=False,
          batch_size=32,
          nb_epoch=100,
          dens_kernel_reg_l1=0.1,
          dens_kernel_reg_l2=0.1,
          dens_activity_reg_l1=0.1,
          dens_activity_reg_l2=0.1,
          conv3d_w_reg_l1=0.1,
          conv3d_w_reg_l2=0.1,
          conv3d_b_reg_l1=0.1,
          conv3d_b_reg_l2=0.1,
          conv3d_activity_reg_l1=0.1,
          conv3d_activity_reg_l2=0.1):
    # str of time
    current_datetime = datetime.datetime.now()
    str_datetime = current_datetime.strftime("%Y-%m-%d_%H-%M-%S")

    # Helper: Save the model.
    checkpoint_path = os.path.join('data', 'checkpoints',
                                   EXP_NAME + '-' + model + '-' + data_type + \
                                   '.{epoch:03d}-{val_loss:.3f}' + '-' + str_datetime + '.hdf5')
    checkpointer = ModelCheckpoint(filepath=checkpoint_path,
                                   verbose=1,
                                   save_best_only=True)

    # Helper: Schedule learning rate decay
    def step_decay(epoch):
        initial_lr = INIT_LEARNING_RATE
        lr_drop_ratio = LR_DROP_RATIO
        epochs_drop = EPOCHS_DROP
        lr = initial_lr * math.pow(lr_drop_ratio,
                                   math.floor((1 + epoch) / epochs_drop))
        print(lr)
        return lr

    learning_rate = LearningRateScheduler(step_decay)

    # Helper: TensorBoard
    # tb = TensorBoard(log_dir=os.path.join('data', 'logs', EXP_NAME + str_datetime))
    tb = LRTensorBoard(log_dir=os.path.join('data', 'logs', EXP_NAME +
                                            str_datetime))

    # Helper: Save results.
    log_path = os.path.join(
        'data', 'logs', EXP_NAME + '-' + 'training-' + str_datetime + '.log')
    csv_logger = CSVLogger(log_path)

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=PATIENTS)

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes),
                        model,
                        seq_length,
                        saved_model,
                        dens_kernel_reg_l1=dens_kernel_reg_l1,
                        dens_kernel_reg_l2=dens_kernel_reg_l2,
                        dens_activity_reg_l1=dens_activity_reg_l1,
                        dens_activity_reg_l2=dens_activity_reg_l2,
                        conv3d_w_reg_l1=conv3d_w_reg_l1,
                        conv3d_w_reg_l2=conv3d_w_reg_l2,
                        conv3d_b_reg_l1=conv3d_b_reg_l1,
                        conv3d_b_reg_l2=conv3d_b_reg_l2,
                        conv3d_activity_reg_l1=conv3d_activity_reg_l1,
                        conv3d_activity_reg_l2=conv3d_activity_reg_l2)

    # Get the optimizer:
    if OPTIMIZER == 'SGD':
        optimizer = keras.optimizers.SGD(lr=INIT_LEARNING_RATE,
                                         momentum=MOMENTUM,
                                         nesterov=False)
    elif OPTIMIZER == 'RMSProp':
        optimizer = keras.optimizers.RMSprop(lr=INIT_LEARNING_RATE,
                                             epsilon=None)
    elif OPTIMIZER == 'Adam':
        optimizer = keras.optimizers.Adam(lr=INIT_LEARNING_RATE,
                                          beta_1=0.9,
                                          beta_2=0.999,
                                          epsilon=None,
                                          amsgrad=False)

    rm.model.compile(loss=LOSS_FUNCTION,
                     optimizer=optimizer,
                     metrics=['accuracy'])

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger, learning_rate],
                     epochs=nb_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(generator=generator,
                               steps_per_epoch=steps_per_epoch,
                               epochs=nb_epoch,
                               verbose=1,
                               callbacks=[
                                   tb, early_stopper, csv_logger, checkpointer,
                                   learning_rate
                               ],
                               validation_data=val_generator,
                               validation_steps=40,
                               workers=4)
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          concat=False,
          class_limit=None,
          image_shape=None,
          load_to_memory=True):
    # Set variables.
    nb_epoch = 1000000
    batch_size = 32

    # Helper: Save the model.
    checkpointer = ModelCheckpoint(                                             #This is for writing out the logs 
        filepath='./data/checkpoints/' + model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir='./data/logs')

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(
        patience=100000)  #this number of epoches with no impovement

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger('./data/logs/' + model + '-' + 'training-' + \
        str(timestamp) + '.log')

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.

    steps_per_epoch = (len(data.data) * 0.7) // batch_size
    print("Iterations per epoach", steps_per_epoch)

    if load_to_memory:
        # Get data.

        X, y = data.get_all_sequences_in_memory('train', data_type, concat)
        X_test, y_test = data.get_all_sequences_in_memory(
            'test', data_type, concat)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type,
                                         concat)
        val_generator = data.frame_generator(batch_size, 'test', data_type,
                                             concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length,
                        saved_model)  #object for the architecture we need
    print(rm)
    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger],
                     epochs=nb_epoch)