Ejemplo n.º 1
0
def getCallbacks(patience_es, patience_rlr):
    """
    FUNCTION:
        Returns a list of callbacks with the provided properties
    
    PARAMS:
        patience_es: int
            Number of iterations to wait before EarlyStopping is performed
        patience_rlr: int
            Number of iterations to wait before ReduceLearningRate is performed
    
    RETURNS:
        List of callbacks
    """
    return modelFuncs.getBasicCallbacks(patience_es=patience_es, patience_rlr=patience_rlr)
Ejemplo n.º 2
0
def setLSTMCallbacks(patience_es, patience_rlr):
    """
    FUNCTION:
        Redefines the default LSTM callbacks
        NB: only for current state
    
    PARAMS:
        patience_es: int
            Number of iterations to wait before EarlyStopping is performed
        patience_rlr: int
            Number of iterations to wait before ReduceLearningRate is performed
    
    RETURNS:
        None
    """
    global _default_LSTM_args
    _default_LSTM_args['callbacks'] = modelFuncs.getBasicCallbacks(patience_es=patience_es, patience_rlr=patrience_rlr)
Ejemplo n.º 3
0
_X_train = None
_y_train = None
_X_test = None
_y_test = None
_maxEnrolWindow = None
_indexColumn = None

_default_MLP_args = {
    'activation': 'relu',
    'loss': 'mean_absolute_error',
    'optimizer': 'adam',
    'metrics': ['mean_absolute_error'],
    'epochs': 500,
    'batchSize': 128 * 2,
    'verbose': 1,
    'callbacks': modelFuncs.getBasicCallbacks(patience_es=60, patience_rlr=40),
    'enrolWindow': 0,
    'validationSize': 0.2,
    'testSize': 0.2,
}

_default_LSTM_args = {
    'activation': 'tanh',
    'loss': 'mean_absolute_error',
    'optimizer': 'adam',
    'metrics': ['mean_absolute_error'],
    'epochs': 500,
    'batchSize': 128 * 2,
    'verbose': 1,
    'callbacks': modelFuncs.getBasicCallbacks(patience_es=60, patience_rlr=40),
    'enrolWindow': 12,
Ejemplo n.º 4
0
from howiml.utils import plots
from howiml.utils import prints
from howiml.utils import analysis

import numpy as np
import tensorflow as tf

_default_MLP_args = {
    'activation': 'relu',
    'loss': 'mean_squared_error',
    'optimizer': 'adam',
    'metrics': ['mean_squared_error'],
    'epochs': 2000,
    'batchSize': 64,
    'verbose': 1,
    'callbacks': modelFuncs.getBasicCallbacks(patience_es=300, patience_rlr=150),
    'enrolWindow': 0,
    'validationSize': 0.2,
    'testSize': 0.2,
}

_default_LSTM_args = {
    'activation': 'tanh',
    'loss': 'mean_squared_error',
    'optimizer': 'adam',
    'metrics': ['mean_squared_error'],
    'epochs': 500,
    'batchSize': 128,
    'verbose': 1,
    'callbacks': modelFuncs.getBasicCallbacks(patience_es=75, patience_rlr=50),
    'enrolWindow': 32,