Ejemplo n.º 1
0
    def compute(self, config, budget, **kwargs):
        """
        Evaluates the configuration on the defined budget and returns the validation performance.

        Args:
            config: dictionary containing the sampled configurations by the optimizer
            budget: (float) amount of time/epochs/etc. the model can use to train
        Returns:
            dictionary with mandatory fields:
                'loss' (scalar)
                'info' (dict)
        """
        lr = config["learning_rate"]
        num_filters = config["num_filters"]
        batch_size = config["batch_size"]
        filter_size = config["filter_size"]

        epochs = budget

        # TODO: train and validate your convolutional neural networks here
        # <JAB>

        # Define the model
        model = Sequential()
        model.add(
            Conv2D(num_filters,
                   kernel_size=filter_size,
                   activation='relu',
                   input_shape=(28, 28, 1),
                   padding='same'))
        model.add(MaxPooling2D(pool_size=2))
        model.add(
            Conv2D(num_filters,
                   kernel_size=filter_size,
                   activation='relu',
                   padding='same'))
        model.add(MaxPooling2D(pool_size=2))

        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dense(10, activation='softmax'))

        optimizer = SGD(lr=lr)

        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        model.summary()

        # Train the Model
        print('\n\n*Starting Training:')
        train_his = model.fit(self.x_train,
                              self.y_train,
                              batch_size=batch_size,
                              epochs=epochs,
                              validation_data=(self.x_test, self.y_test),
                              verbose=2,
                              use_multiprocessing=True)

        #print('\n\n*Training Evaluation:')
        #train_score = model.evaluate(self.x_train, self.y_train, verbose=0)
        print('\n\n*Validation Evaluation:')
        val_score = model.evaluate(self.x_valid, self.y_valid, verbose=0)
        print('\n\n*Test Evaluation:')
        test_score = model.evaluate(self.x_test, self.y_test, verbose=0)

        # </JAB>
        # TODO: We minimize so make sure you return the validation error here

        from tensorflow.keras.initializers import glorot_uniform  # Or your initializer of choice

        initial_weights = model.get_weights()
        new_weights = [
            glorot_uniform()(w.shape).eval() for w in initial_weights
        ]
        model.set_weights(new_weights)

        return ({
            'loss':
            val_score[0],  # this is the a mandatory field to run hyperband
            'info': {
                'fit_lc': train_his.history,
                'test_score': test_score
            }  # can be used for any user-defined information - also mandatory
        })
Ejemplo n.º 2
0
def Model_RNN3(input_shape,
               classes,
               n_pool='average',
               n_l2=0.001,
               n_init='he_normal',
               **kwargs):
    name = kwargs.get("name", 'AtzoriNet2')
    base_channel = kwargs.get("base_channel", 64)

    if n_init == 'glorot_normal':
        kernel_init = initializers.glorot_normal(seed=0)
    elif n_init == 'glorot_uniform':
        kernel_init = initializers.glorot_uniform(seed=0)
    elif n_init == 'he_normal':
        kernel_init = initializers.he_normal(seed=0)
    elif n_init == 'he_uniform':
        kernel_init = initializers.x(seed=0)
    elif n_init == 'normal':
        kernel_init = initializers.normal(seed=0)
    elif n_init == 'uniform':
        kernel_init = initializers.uniform(seed=0)
    # kernel_init = n_init
    kernel_regl = regularizers.l2(n_l2)

    ## Block 0 [Input]
    X_input = Input(input_shape, name='b0_input')
    X = X_input
    chanel = base_channel
    ################################################################
    # X=AveragePooling2D((3,1),strides=(1,1),padding='same')(X)
    X = ZeroPadding2D((0, 1))(X)
    X = Conv2D(chanel, (1, 3),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b{}_conv2d_1_3x3'.format(1))(X)
    X = LayerNormalization()(X)
    X = Activation('relu', name='b{}_relu1'.format(1))(X)
    chanel = chanel * 2
    X = ZeroPadding2D((0, 1))(X)  # (8,8)
    X = Conv2D(chanel, (1, 3),
               strides=(1, 2),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b{}_conv2d_2_3x3'.format(1))(X)
    X = LayerNormalization()(X)
    X = Activation('relu', name='b{}_relu2'.format(1))(X)
    ################################################################
    # CNN to RNN
    inner = Reshape(target_shape=((60, chanel * 4)),
                    name='reshape')(X)  # (None, 64, 384)
    inner = Dense(512,
                  activation='relu',
                  kernel_initializer='he_normal',
                  name='dense1')(inner)  # (None, 60, 384)
    ################################################################
    # RNN layer
    gru_1 = LSTM(256,
                 return_sequences=True,
                 kernel_initializer='he_normal',
                 name='gru1')(inner)  # (None, 60, 512)
    gru_1b = LSTM(256,
                  return_sequences=True,
                  go_backwards=True,
                  kernel_initializer='he_normal',
                  name='gru1_b')(inner)
    # reversed_gru_1b = Lambda(lambda inputTensor: tf.reverse(inputTensor, axes=1))(gru_1b)
    reversed_gru_1b = tf.reverse(gru_1b, axis=[1])
    gru1_merged = add([gru_1, reversed_gru_1b])  # (None, 60, 512)
    gru1_merged = LayerNormalization()(gru1_merged)

    gru_2 = LSTM(256,
                 return_sequences=True,
                 kernel_initializer='he_normal',
                 name='gru2')(gru1_merged)
    gru_2b = LSTM(256,
                  return_sequences=True,
                  go_backwards=True,
                  kernel_initializer='he_normal',
                  name='gru2_b')(gru1_merged)
    # reversed_gru_2b = Lambda(lambda inputTensor: tf.reverse(inputTensor, axes=1))(gru_2b)
    reversed_gru_2b = tf.reverse(gru_2b, axis=[1])
    gru2_merged = concatenate([gru_2, reversed_gru_2b])  # (None, 60, 1024)
    X = gru2_merged
    ################################################################
    # X = BatchNormalization()(X)
    X = GlobalAveragePooling1D()(X)
    X = Dropout(0.5)(X)
    ## Block 5 [Pad -> Conv -> Softmax]
    X = Dense(classes, activation="softmax")(X)
    model = Model(
        inputs=X_input,
        outputs=X,
        name=name,
    )

    return model






#scaler = StandardScaler()
#scaler1 = scaler.fit(y1_train)
#y1_train = scaler.transform(y1_train)
#y1_test = scaler.transform(y1_test)
#sc = StandardScaler()
#sc = scaler.fit(y2_train)
#y2_train = sc.transform(y2_train)
#y2_test = sc.transform(y2_test)
my_init=glorot_uniform(seed=42)



def MT_model(perm):
 
    #def funx1(i,maxL):

     #if i<maxL:
         #return Input(shape=(fpSize,),name=f"MRg_{i}")
     #elif i<2*maxL-3 and i!=maxL+2:
     
    # elif maxL<=i< 2*maxL:
         #return Input(shape=(FpLen,),name=f"Ml_{i}")
     #else:   
     #    return Input(shape=(FpLen1,),name=f"Sq_{i}")
Ejemplo n.º 4
0
def convolutional_block(X, f, filters, stage, block, s=2):
    """
    Implementation of the convolutional block as defined in Figure 4

    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    f -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    s -- Integer, specifying the stride to be used

    Returns:
    X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value
    X_shortcut = X

    ##### MAIN PATH #####
    # First component of main path
    X = Conv2D(F1, (1, 1),
               strides=(s, s),
               name=conv_name_base + '2a',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    # Second component of main path (≈3 lines)
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    # Third component of main path (≈2 lines)
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    ##### SHORTCUT PATH #### (≈2 lines)
    X_shortcut = Conv2D(filters=F3,
                        kernel_size=(1, 1),
                        strides=(s, s),
                        padding='valid',
                        name=conv_name_base + '1',
                        kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3,
                                    name=bn_name_base + '1')(X_shortcut)

    # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
Ejemplo n.º 5
0
#==================================================================
#********************  Learning  **********************************
#==================================================================

# Importing the Keras libraries and packages
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import losses
from tensorflow.keras import optimizers
import tensorflow.keras.initializers as init

# Initialising the ANN
reg = Sequential()
reg.add(
    Dense(units=128,
          kernel_initializer=init.glorot_uniform(),
          activation='relu',
          input_dim=5))
reg.add(
    Dense(units=256,
          kernel_initializer=init.glorot_uniform(),
          activation='relu'))
reg.add(
    Dense(units=512,
          kernel_initializer=init.glorot_uniform(),
          activation='relu'))
reg.add(
    Dense(units=128,
          kernel_initializer=init.glorot_uniform(),
          activation='relu'))
reg.add(
Ejemplo n.º 6
0
def STN(model_params, input_shape=(53, 52, 63, 53), output_shape=1):
    '''Build the CNN model
    model_params: dict with the architecture parameters
        convFilt: list with number of filters in each layer
        dropout: dropout before the last dense layer
        l2: weights decay regularization parameter (L2)
        optlr: learning rate
        optType: 0 =  Nadam, 1 = Adamax, 2 = SGD, 3 = Adam, 4 = Nadam
        optMom: momentum rate
    input_shape: tuple, the shape of the input volume
    output_shape: integer, # of output neurons
    '''
    def convolutional_block(X, filters, stage, s=2, weight_decay=0.001):
        # defining name basis
        conv_name_base = 'conv_' + stage
        bn_name_base = 'bn_' + stage
        # First component of main path
        X = Conv3D(filters, (3, 3, 3),
                   strides=(1, 1, 1),
                   name=conv_name_base + '_1',
                   kernel_initializer=glorot_uniform(seed=0),
                   kernel_regularizer=regularizers.l2(weight_decay))(X)
        X = BatchNormalization(name=bn_name_base + '_1')(X)
        X = Activation('relu')(X)
        X = Conv3D(filters, (3, 3, 3),
                   strides=(s, s, s),
                   name=conv_name_base + '_2',
                   kernel_initializer=glorot_uniform(seed=0),
                   kernel_regularizer=regularizers.l2(weight_decay))(X)
        X = BatchNormalization(name=bn_name_base + '_2')(X)
        X = Activation('relu')(X)
        return X

    filters = model_params.get('convFilt')
    weight_decay = model_params.get('l2')

    optType, optlr, optMom = model_params.get('optType'), model_params.get(
        'optlr'), model_params.get('optMom')
    Optm = OptimTypeFunc(optType, optlr, optMom)
    X_input = Input(shape=input_shape)
    X = Conv3D(filters[0], (1, 1, 1),
               name='conv3D_first_reduce_channels',
               kernel_initializer=glorot_uniform(seed=0),
               kernel_regularizer=regularizers.l2(weight_decay))(X_input)
    X = BatchNormalization(name='bn_f_reduce_channels')(X)
    X = Activation('relu')(X)
    X = convolutional_block(X,
                            filters=filters[1],
                            stage='a',
                            weight_decay=weight_decay)
    X = convolutional_block(X,
                            filters=filters[2],
                            stage='b',
                            weight_decay=weight_decay)
    X = Conv3D(filters[4], (1, 1, 1),
               name='conv3D_second_reduce_channels',
               kernel_initializer=glorot_uniform(seed=0),
               kernel_regularizer=regularizers.l2(weight_decay))(X)
    X = BatchNormalization(name='bn_s_reduce_channels')(X)
    X = Activation('relu')(X)
    X = Flatten()(X)
    X = Dropout(model_params.get('dropout'))(X)
    final_pred = Dense(output_shape)(X)
    model = Model(inputs=X_input, outputs=final_pred, name='SFCN')
    model.compile(loss=['mse'],
                  optimizer=Optm,
                  metrics=['mean_absolute_error', 'mse'])
    model.summary()
    return model
Ejemplo n.º 7
0
def res_block(X, filter, stage):

    # Convolutional_block
    X_copy = X

    f1, f2, f3 = filter

    # Main Path
    X = Conv2D(f1, (1, 1),
               strides=(1, 1),
               name='res_' + str(stage) + '_conv_a',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = MaxPool2D((2, 2))(X)
    X = BatchNormalization(axis=3, name='bn_' + str(stage) + '_conv_a')(X)
    X = Activation('relu')(X)

    X = Conv2D(f2,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               name='res_' + str(stage) + '_conv_b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name='bn_' + str(stage) + '_conv_b')(X)
    X = Activation('relu')(X)

    X = Conv2D(f3,
               kernel_size=(1, 1),
               strides=(1, 1),
               name='res_' + str(stage) + '_conv_c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name='bn_' + str(stage) + '_conv_c')(X)

    # Short path
    X_copy = Conv2D(f3,
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    name='res_' + str(stage) + '_conv_copy',
                    kernel_initializer=glorot_uniform(seed=0))(X_copy)
    X_copy = MaxPool2D((2, 2))(X_copy)
    X_copy = BatchNormalization(axis=3,
                                name='bn_' + str(stage) + '_conv_copy')(X_copy)

    # ADD
    X = Add()([X, X_copy])
    X = Activation('relu')(X)

    # Identity Block 1
    X_copy = X

    # Main Path
    X = Conv2D(f1, (1, 1),
               strides=(1, 1),
               name='res_' + str(stage) + '_identity_1_a',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3,
                           name='bn_' + str(stage) + '_identity_1_a')(X)
    X = Activation('relu')(X)

    X = Conv2D(f2,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               name='res_' + str(stage) + '_identity_1_b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3,
                           name='bn_' + str(stage) + '_identity_1_b')(X)
    X = Activation('relu')(X)

    X = Conv2D(f3,
               kernel_size=(1, 1),
               strides=(1, 1),
               name='res_' + str(stage) + '_identity_1_c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3,
                           name='bn_' + str(stage) + '_identity_1_c')(X)

    # ADD
    X = Add()([X, X_copy])
    X = Activation('relu')(X)

    # Identity Block 2
    X_copy = X

    # Main Path
    X = Conv2D(f1, (1, 1),
               strides=(1, 1),
               name='res_' + str(stage) + '_identity_2_a',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3,
                           name='bn_' + str(stage) + '_identity_2_a')(X)
    X = Activation('relu')(X)

    X = Conv2D(f2,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               name='res_' + str(stage) + '_identity_2_b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3,
                           name='bn_' + str(stage) + '_identity_2_b')(X)
    X = Activation('relu')(X)

    X = Conv2D(f3,
               kernel_size=(1, 1),
               strides=(1, 1),
               name='res_' + str(stage) + '_identity_2_c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3,
                           name='bn_' + str(stage) + '_identity_2_c')(X)

    # ADD
    X = Add()([X, X_copy])
    X = Activation('relu')(X)

    return X
Ejemplo n.º 8
0
def ResNet(input_shape, classes):
    _input = Input(shape=input_shape)
    res = ZeroPadding2D([3, 3])(_input)

    # stage 1
    res = Conv2D(filters=64,
                 kernel_size=(7, 7),
                 strides=(2, 2),
                 name="res_stage_1_conv",
                 kernel_initializer=glorot_uniform(seed=0))(res)
    res = BatchNormalization(axis=3, name="res_stage_1_BN")(res)
    res = Activation('relu')(res)
    res = MaxPooling2D((3, 3), strides=(2, 2))(res)

    # stage 2
    res = conv_block(input_tensor=res,
                     kernel_size=3,
                     filters=[64, 64, 256],
                     stride=1,
                     stage="2a")

    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[64, 64, 256],
                   stage="2b")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[64, 64, 256],
                   stage="2c")
    # stage 3
    res = conv_block(input_tensor=res,
                     kernel_size=3,
                     filters=[128, 128, 512],
                     stride=2,
                     stage="3a")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[128, 128, 512],
                   stage="3b")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[128, 128, 512],
                   stage="3c")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[128, 128, 512],
                   stage="3d")

    # stage 4
    res = conv_block(input_tensor=res,
                     kernel_size=3,
                     filters=[256, 256, 1024],
                     stride=2,
                     stage="4a")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[256, 256, 1024],
                   stage="4b")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[256, 256, 1024],
                   stage="4c")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[256, 256, 1024],
                   stage="4d")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[256, 256, 1024],
                   stage="4e")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[256, 256, 1024],
                   stage="4f")

    # stage 5
    res = conv_block(input_tensor=res,
                     kernel_size=3,
                     filters=[512, 512, 2048],
                     stride=2,
                     stage="5a")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[512, 512, 2048],
                   stage="5b")
    res = id_block(input_tensor=res,
                   kernel_size=3,
                   filters=[512, 512, 2048],
                   stage="5c")
    res = AveragePooling2D(pool_size=(2, 2), padding='same')(res)

    res = Flatten()(res)
    res = Dense(classes,
                activation='softmax',
                name='output_fc_layer',
                kernel_initializer=glorot_uniform(seed=0))(res)

    model = Model(inputs=_input, outputs=res, name='ResNet')

    return model
Ejemplo n.º 9
0
def create_model(x_train, y_train, x_test, y_test):
    """
    Function to create the ML model
    """

    # Define input size for first layer
    in_size = (64, 64, 1)

    # Define number of classes predicted
    out_classes = 2

    # Define a sequential model so we can quickly make a model by adding layers to the API
    model = tf.keras.Sequential()

    # Convolve input once
    L1 = layers.Conv2D(
        filters=32,
        kernel_size=(4, 4),
        strides=[2, 2],
        input_shape=in_size,
        activation='relu',
        kernel_initializer=initializers.glorot_uniform(seed=None),
        padding='same',
        kernel_regularizer=regularizers.l2(0.01))
    model.add(L1)

    # Convolve input again
    L2 = layers.Conv2D(
        filters=16,
        kernel_size=(2, 2),
        strides=[2, 2],
        input_shape=in_size,
        activation='relu',
        kernel_initializer=initializers.glorot_uniform(seed=None),
        padding='same',
        kernel_regularizer=regularizers.l2(0.01))
    model.add(L2)

    # Pool the convolutions and extract important parts
    P1 = layers.MaxPooling2D(pool_size=3,
                             strides=2,
                             padding='valid',
                             name="P1")
    model.add(P1)

    # Flatten the pooled layer
    F = layers.Flatten(name="flatten")
    model.add(F)

    # Add dropout to the flattened layers to generalize better
    dO = layers.Dropout(0.01)
    model.add(dO)

    # First dense layer
    D1 = layers.Dense(
        256,
        activation='relu',
        name='D1',
        kernel_initializer=initializers.glorot_uniform(seed=None),
        kernel_regularizer=regularizers.l2(0.01))
    model.add(D1)

    # Output layer
    D2 = layers.Dense(out_classes, activation='softmax', name='D2')
    model.add(D2)

    # Output the structure of the model
    model.summary()

    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer=optimizers.Adam(lr=0.00001))

    # 1000 is a lot but since the initialization might different at all times. 1000 guarantees convergence
    result = model.fit(train_x,
                       train_y,
                       epochs=1000,
                       verbose=2,
                       validation_data=(test_x, test_y))

    # Store model after training acoording to time it was made
    filepath = "./models/model-" + str(time.time()) + ".h5"
    tf.keras.models.save_model(model,
                               filepath,
                               overwrite=True,
                               include_optimizer=True)
Ejemplo n.º 10
0
ANN_top10 = []
cmap = plt.set_cmap('cividis')
cNorm = mpl.colors.Normalize(vmin=-7, vmax=-1)

for m in modellist:
    #read in model files
    ANN_list = []

    foldername = m.split('/')[-1]
    for i in range(fold):
        json_file = open(m + '/' + 'model_' + str(i) + '.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        ANN = model_from_json(
            loaded_model_json,
            custom_objects={'GlorotUniform': glorot_uniform()})
        ANN.load_weights(workinghome + '/' + foldername + '/' + 'model_' +
                         str(i) + '.h5')
        ANN_test_predicted = ANN.predict(x_test_scale)
        ANN_list.append(ANN)
#        setup_curves_compare(site, scaler, workinghome, foldername, siteparams, ANN_list, vref, pre_scatter = np.asarray([0]), obs_scatter = np.asarray([0]), dist_scatter = np.asarray([0]), mag_scatter = np.asarray([0]))
    ANN_top10.append(ANN_list)  #the 5 fold models

    mlist = np.linspace(2.8, 5.0, 200)
    Rlist = np.linspace(10., 225., 200)
    X, Y = np.meshgrid(Rlist, mlist)
    Z = np.zeros((len(X), len(Y)))

    for i in range(len(X)):
        if site == '5coeff':
            d = {'mw': Y[i][0], 'R': X[i]}
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.metrics import Recall
from keras_preprocessing.image import ImageDataGenerator

from configuration import image_directory, augmented_image_directory, \
    training_images_list_filename, training_augmented_sample_list_filename, \
    validation_images_list_filename, \
    class_map, num_classes, model_filename_first

#%% ---------------------------------------- Set-Up --------------------------------------------------------------------
SEED = 42
os.environ['PYTHONHASHSEED'] = str(SEED)
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
weight_init = glorot_uniform(seed=SEED)

#%%
train_df = pd.read_csv(training_images_list_filename)
test_df = pd.read_csv(validation_images_list_filename)

datagen = ImageDataGenerator(rescale=1. / 255., validation_split=0.25)

train_generator = datagen.flow_from_dataframe(dataframe=train_df,
                                              directory=None,
                                              x_col="name",
                                              y_col="class",
                                              subset="training",
                                              batch_size=32,
                                              seed=42,
                                              shuffle=True,
Ejemplo n.º 12
0
def result():
    
    '''
    Load model and vectorizer
    '''
    print("JUMP TO PREDICT!!!")
    
    getfile = "file.wav"
    juice = sr.AudioFile(getfile)
    with juice as source:
        audio = r.record(source)
        text = r.recognize_google(audio)
        print("TRANSCRIPTION IS: ", text)
    
    # load VAD text models
    model_val = joblib.load('model_text_valence_iemocap.pkl')
    model_act = joblib.load('model_text_activation_iemocap.pkl')
    model_dom = joblib.load('model_text_dominance_iemocap.pkl')
    vect_file = joblib.load('vect_obj_iemocap.pkl')
    
    # munge text
    message = clean_lemma(text)
    message = vect_file.transform(message).toarray()

    # Text predictions
    predictions_V = model_val.predict(message)
    predictions_A = model_act.predict(message)
    predictions_D = model_dom.predict(message)
      
    # trigger functions to read wav, munge it and predict VAD from audio
    
    # List to store lpms matrices
    wav_samples = []
    
    # get datapoints and sample rate of file and load it
    samples, sar = lib.load(getfile, sr=None)
    silence_stripped = strip_silence(samples)
    lpms_ified = convert_to_lpms(silence_stripped)
    chunk_scale_lpms_matrix(lpms_ified, wav_samples)

    # model_audio = keras.models.load_model('model_audio_iemocap_v2.h5')
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model_audio = load_model('model_audio_iemocap_v2.h5')

    print("Loaded model from disk")
    
    # As wav_samples is a list I can't use array indexing on it
    # convert to ndarray
    wav_samples = np.array(wav_samples)
    print("wav_samples length: ", len(wav_samples))
    print("wav_samples type: ", type(wav_samples))
    
    nRows, nCols, nDims = 20, 25, 1
    wav_samples = wav_samples.reshape(wav_samples.shape[0], nRows, nCols, nDims)
    print("RESHAPED wav_samples: ", wav_samples.shape)
    
    # Step through each 0.4 sec chunk and make a prediction, store it    
    audio_predictions = model_audio.predict(wav_samples, batch_size=32, verbose=2)
    
    print("Predictions list length: ", len(audio_predictions))
    print("Predictions slot[0] length: ", len(audio_predictions[0]))
    
    # Calculate the mean of each prediction
    audio_pred_val = audio_predictions[:, 0].mean()
    audio_pred_act = audio_predictions[:, 1].mean()
    audio_pred_dom = audio_predictions[:, 2].mean()
    
    print("Length of frame data: ", len(audio.frame_data))
    print("File sample_rate: ", audio.sample_rate)
    print(predictions_V, audio_pred_val)
    print(predictions_A, audio_pred_act)
    print(predictions_D, audio_pred_dom)
    
    text_ = [str(text)]
    
    # Provide predictions to results page
    return render_template('result.html',  # was result.html
                           pred_words=text_,
                           pred_V=predictions_V,
                           pred_A=predictions_A,
                           pred_D=predictions_D,
                           pred_Vaud=audio_pred_val,
                           pred_Aaud=audio_pred_act,
                           pred_Daud=audio_pred_dom)
def train_classifier_nn():
    feature_set = pickle.load(open(FSETDIR, "rb"))
    label_set = pickle.load(open(LSETDIR, "rb"))

    num_of_classes = len(CATEGORIES)

    #label_set = to_categorical(label_set)

    # Scale (normalize) data
    feature_set = feature_set/255.0

    # Build CNN model
    model = Sequential()
    # Conv2D, 64 filters, 3x3 filter size, same input size as images
    model.add(Conv2D(4, (3,3), input_shape = feature_set.shape[1:]))
    # Activation layer, rectify linear activation
    model.add(Activation("relu"))
    # Pooling layer, max pooling2D
    model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2)))

    model.add(Conv2D(4, (3,3), input_shape = feature_set.shape[1:]))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2)))

    # Dense layer, requires 1D input so flatten the dataset first
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation("relu"))

    # Output layer, no activation function
    model.add(Dense(num_of_classes))

    model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])

    best_accuracy = 0.0

    for i in range(0,10):

        # First of all, randomize weights (reinitialize the model)
        initial_weights = model.get_weights()
        backend_name = keras_backend.backend()
        if backend_name == 'tensorflow': 
            k_eval = lambda placeholder: placeholder.eval(session=keras_backend.get_session())
        elif backend_name == 'theano': 
            k_eval = lambda placeholder: placeholder.eval()
        else: 
            raise ValueError("Unsupported backend")

        new_weights = [k_eval(glorot_uniform()(w.shape)) for w in initial_weights]
        model.set_weights(new_weights)

        # Fit the model to the training data
        # Note: model will converge nicely after 10 epochs, use that or more in the final program
        result = model.fit(feature_set, label_set, batch_size=32, epochs=15, validation_split=0.1)

        accuracy = result.history["acc"][-1]

        if accuracy > best_accuracy:
            # Save model if we beat our best accuracy
            model.save("{}classifier-CNN.model".format(MODELDIR))
            best_accuracy = accuracy

    print("Save classification model with best accuracy: {}".format(best_accuracy))
Ejemplo n.º 14
0
def Model_beta1(input_shape,
                classes,
                n_pool='average',
                n_l2=0.001,
                n_init='glorot_normal',
                **kwargs):
    """
    Arguments:
        input_shape -- tuple, dimensions of the input in the form (height, width, channels)
        classes -- integer, number of classes to be classified, defines the dimension of the softmax unit
        n_pool -- string, pool method to be used {'max', 'average'}
        n_dropout -- float, rate of dropping units
        n_l2 -- float, ampunt of weight decay regularization
        n_init -- string, type of kernel initializer {'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform', 'normal', 'uniform'}
        batch_norm -- boolean, whether BatchNormalization is applied to the input

    Returns:
        model -- keras.models.Model (https://keras.io)
    """
    with_NL = kwargs.get("with_NL", False)
    name = kwargs.get("name", 'AtzoriNet2')
    base_channel = kwargs.get("base_channel", 64)

    activation = kwargs.get("activation", "relu")

    if n_init == 'glorot_normal':
        kernel_init = initializers.glorot_normal(seed=0)
    elif n_init == 'glorot_uniform':
        kernel_init = initializers.glorot_uniform(seed=0)
    elif n_init == 'he_normal':
        kernel_init = initializers.he_normal(seed=0)
    elif n_init == 'he_uniform':
        kernel_init = initializers.he_uniform(seed=0)
    elif n_init == 'normal':
        kernel_init = initializers.normal(seed=0)
    elif n_init == 'uniform':
        kernel_init = initializers.uniform(seed=0)
    # kernel_init = n_init
    kernel_regl = regularizers.l2(n_l2)
    # kernel_regl = regularizers.l1(n_l2)
    ## Block 0 [Input]
    X_input = Input(input_shape, name='b0_input')
    X = X_input
    chanel = base_channel
    ################################################################
    X = ZeroPadding2D((1, 1))(X)
    X = Conv2D(chanel, (1, 5),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b{}_conv2d_1_3x3'.format(1))(X)
    X = BatchNormalization()(X)
    X = Activation(activation, name='b{}_relu1'.format(1))(X)

    chanel = chanel * 2
    X = ZeroPadding2D((1, 1))(X)  # (8,8)
    X = Conv2D(chanel, (1, 5),
               strides=(1, 2),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b{}_conv2d_2_3x3'.format(1))(X)
    X = BatchNormalization()(X)
    X = Activation(activation, name='b{}_relu2'.format(1))(X)

    chanel = chanel * 2
    ################################################################
    X = ZeroPadding2D((1, 1))(X)
    X = Conv2D(chanel, (5, 3),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b{}_conv2d_1_3x3'.format(2))(X)
    X = BatchNormalization()(X)
    X = Activation(activation, name='b{}_relu1'.format(2))(X)

    chanel = chanel * 2
    X = ZeroPadding2D((1, 1))(X)  # (8,8)
    X = Conv2D(chanel, (5, 3),
               strides=(2, 2),
               padding='valid',
               kernel_regularizer=kernel_regl,
               kernel_initializer=kernel_init,
               name='b{}_conv2d_2_3x3'.format(2))(X)

    X = BatchNormalization()(X)
    X = Activation(activation, name='b{}_relu2'.format(2))(X)

    X = GlobalAveragePooling2D()(X)

    X = Dropout(0.5)(X)
    ## Block 5 [Pad -> Conv -> Softmax]
    X = Dense(classes, activation="softmax")(X)

    model = Model(
        inputs=X_input,
        outputs=X,
        name=name,
    )

    return model
def model(datagen, X_train, y_train, X_val, y_val):
    
    inputs = Input(shape=(128,128,3))
    x = Convolution2D(48, (3, 3), padding='same', activation='relu')(inputs)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Dropout({{uniform(0, 0.3)}})(x)
    
    x = Convolution2D({{choice([64,128])}}, {{choice([3, 5])}}, padding='same', activation='relu')(x)                      
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Dropout({{uniform(0, 0.5)}})(x)
    
    x = Convolution2D({{choice([128, 256])}}, {{choice([3, 5])}}, padding='same', activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Dropout({{uniform(0, 0.7)}})(x)
    
    model_choice = {{choice(['three', 'four'])}}
    if model_choice == 'four':
    
        x = Convolution2D({{choice([128, 256])}}, {{choice([3, 5])}}, padding='same', activation='relu')(x)
        x = MaxPooling2D(pool_size=(2,2))(x)
        x = Dropout({{uniform(0, 0.8)}})(x)
     
    x = Flatten()(x)
    x = Dense(128, activation='relu',kernel_initializer=({{choice([he_normal(seed = 33),glorot_uniform(seed = 33)])}}))(x)
    x = Dense({{choice([256, 512, 1024])}}, activation='relu',kernel_initializer=({{choice([he_normal(seed = 33),glorot_uniform(seed = 33)])}}))(x)
    x = Dense(5, activation='softmax')(x)
    opt =optimizers.Adam(lr=0.001)
    model = Model(inputs=inputs,outputs=x)
    model.compile(loss='categorical_crossentropy', optimizer=opt,metrics=['accuracy'])
    
    
    # fit the model on the batches generated by datagen.flow()
    model.fit_generator(datagen.flow(X_train, y_train,
                        batch_size=128),     epochs=100,
                        validation_data=(X_val, y_val))

    score, acc = model.evaluate(X_val, y_val, verbose=0)
  
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Ejemplo n.º 16
0
    def build(input_shape, classes):
        model = Sequential()
        filter_num = ['None',32,64,128,256]
        kernel_size = ['None',8,8,8,8]
        conv_stride_size = ['None',1,1,1,1]
        pool_stride_size = ['None',4,4,4,4]
        pool_size = ['None',8,8,8,8]

        model.add(Conv1D(filters=filter_num[1], kernel_size=kernel_size[1],input_shape=input_shape,
                         strides=conv_stride_size[1],padding='same',
                          name='block1_conv1'))
        model.add(BatchNormalization(axis=-1))
        model.add(ELU(alpha=1.0, name='block1_adv_act1'))
        model.add(Conv1D(filters=filter_num[1], kernel_size=kernel_size[1],
                         strides=conv_stride_size[1], padding='same',
                          name='block1_conv2'))
        model.add(BatchNormalization(axis=-1))
        model.add(ELU(alpha=1.0, name='block1_adv_act2'))
        model.add(MaxPooling1D(pool_size=pool_size[1], strides=pool_stride_size[1],
                               padding= 'same', name='block1_pool'))
        model.add(Dropout(0.2, name='block1_dropout'))

        model.add(Conv1D(filters=filter_num[2], kernel_size=kernel_size[2],
                         strides=conv_stride_size[2], padding='same',
                         name='block2_conv1'))
        model.add(BatchNormalization())
        model.add(Activation('relu', name='block2_act1'))

        model.add(Conv1D(filters=filter_num[2], kernel_size=kernel_size[2],
                         strides=conv_stride_size[2], padding='same',
                          name='block2_conv2'))
        model.add(BatchNormalization())
        model.add(Activation('relu', name='block2_act2'))
        model.add(MaxPooling1D(pool_size=pool_size[2], strides=pool_stride_size[3],
                               padding='same', name='block2_pool'))
        model.add(Dropout(0.2, name='block2_dropout'))

        model.add(Conv1D(filters=filter_num[3], kernel_size=kernel_size[3],
                         strides=conv_stride_size[3], padding='same',
                          name='block3_conv1'))
        model.add(BatchNormalization())
        model.add(Activation('relu', name='block3_act1'))
        model.add(Conv1D(filters=filter_num[3], kernel_size=kernel_size[3],
                         strides=conv_stride_size[3], padding='same',
                          name='block3_conv2'))
        model.add(BatchNormalization())
        model.add(Activation('relu', name='block3_act2'))
        model.add(MaxPooling1D(pool_size=pool_size[3], strides=pool_stride_size[3],
                               padding='same', name='block3_pool'))
        model.add(Dropout(0.2, name='block3_dropout'))

        model.add(Conv1D(filters=filter_num[4], kernel_size=kernel_size[4],
                         strides=conv_stride_size[4], padding='same',
                          name='block4_conv1'))
        model.add(BatchNormalization())
        model.add(Activation('relu', name='block4_act1'))
        model.add(Conv1D(filters=filter_num[4], kernel_size=kernel_size[4],
                         strides=conv_stride_size[4], padding='same',
                         name='block4_conv2'))
        model.add(BatchNormalization())
        model.add(Activation('relu', name='block4_act2'))
        model.add(MaxPooling1D(pool_size=pool_size[4], strides=pool_stride_size[4],
                               padding='same', name='block4_pool'))
        model.add(Dropout(0.2, name='block4_dropout'))


        model.add(Flatten(name='flatten'))
        model.add(Dense(512, kernel_initializer = glorot_uniform(seed=0), name='fc1'))
        model.add(BatchNormalization())
        model.add(Activation('relu', name='fc1_act'))

        model.add(Dropout(0.7, name='fc1_dropout'))

        model.add(Dense(512, kernel_initializer = glorot_uniform(seed=0), name='fc2'))
        model.add(BatchNormalization())
        model.add(Activation('relu', name='fc2_act'))

        model.add(Dropout(0.5, name='fc2_dropout'))

        model.add(Dense(classes, kernel_initializer = glorot_uniform(seed=0),name='fc3'))
        model.add(Activation('softmax', name="softmax"))
        return model
Ejemplo n.º 17
0
def custom_mlp_with_mse(input_shape,
                        depth,
                        af='relu',
                        num_classes=1,
                        init_type='None'):
    """ mlp model with mse loss
    # Returns
        model (Model): Keras model instance
    """
    model = None
    x = None
    outputs = None
    inputs = None

    inputs = tf.keras.Input(shape=input_shape)

    if af == 'LeakyReLU':
        if init_type == 'xia':
            initializer = glorot_uniform()
        else:
            initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
        # x = Dense(10, kernel_initializer='glorot_uniform', bias_initializer='zeros')(inputs)
        x = Dense(10, kernel_initializer=initializer,
                  bias_initializer='zeros')(inputs)
        x = Activation(LeakyReLU(alpha=0.1))(x)
    elif af == 'PReLU':
        if init_type == 'xia':
            initializer = glorot_uniform()
        else:
            initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
        # x = Dense(10, kernel_initializer='glorot_uniform', bias_initializer='zeros')(inputs)
        x = Dense(10, kernel_initializer=initializer,
                  bias_initializer='zeros')(inputs)
        x = Activation(PReLU())(x)
    elif af == 'ELU':
        if init_type == 'xia':
            initializer = glorot_uniform()
        else:
            initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
        # initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
        # x = Dense(10, kernel_initializer='glorot_uniform', bias_initializer='zeros')(inputs)
        x = Dense(10, kernel_initializer=initializer,
                  bias_initializer='zeros')(inputs)
        x = Activation(ELU(alpha=0.1))(x)
    else:
        if init_type == 'xia':
            initializer = glorot_uniform()
        else:
            initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
        # x = Dense(10, kernel_initializer='glorot_uniform', bias_initializer='zeros', activation=af)(inputs)
        x = Dense(10,
                  kernel_initializer=initializer,
                  bias_initializer='zeros',
                  activation=af)(inputs)

    for _ in range(depth):
        if af == 'LeakyReLU':
            if init_type == 'xia':
                initializer = glorot_uniform()
            else:
                initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
            # x = Dense(10, kernel_initializer='glorot_uniform', bias_initializer='zeros')(x)
            x = Dense(10,
                      kernel_initializer=initializer,
                      bias_initializer='zeros')(x)
            x = Activation(LeakyReLU(alpha=0.1))(x)
        elif af == 'PReLU':
            if init_type == 'xia':
                initializer = glorot_uniform()
            else:
                initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
            # x = Dense(10, kernel_initializer='glorot_uniform', bias_initializer='zeros')(x)
            x = Dense(10,
                      kernel_initializer=initializer,
                      bias_initializer='zeros')(x)
            x = Activation(PReLU())(x)
        elif af == 'ELU':
            if init_type == 'xia':
                initializer = glorot_uniform()
            else:
                initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
            # x = Dense(10, kernel_initializer='glorot_uniform', bias_initializer='zeros')(x)
            x = Dense(10,
                      kernel_initializer=initializer,
                      bias_initializer='zeros')(x)
            x = Activation(ELU(alpha=0.1))(x)
        else:
            if init_type == 'xia':
                initializer = glorot_uniform()
            else:
                initializer = TruncatedNormal(mean=0.0, stddev=0.5, seed=1)
            # x = Dense(10, kernel_initializer='glorot_uniform', bias_initializer='zeros', activation=af)(x)
            x = Dense(10,
                      kernel_initializer=initializer,
                      bias_initializer='zeros',
                      activation=af)(x)

    outputs = Dense(num_classes,
                    activation='sigmoid',
                    kernel_initializer='he_normal')(x)

    # Instantiate model.
    model = Model(inputs=inputs, outputs=outputs)
    return model
Ejemplo n.º 18
0
def identity_block(X, f, filters, stage, block):
    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value. You'll need this later to add back to the main path.
    X_shortcut = X

    # First component of main path
    X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    # Second component of main path (≈3 lines)
    X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    # Third component of main path (≈2 lines)
    X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)

    # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
Ejemplo n.º 19
0
def runANN_k(workinghome, foldername, hlayers,hunits_list, site, plot, epochs, fold, train_model):
#    AIClist = []
#    sigmalist = []
#    for i in range(len(foldername_list)):
    foldername = foldername
    site = site
    act = 'tanh'
    hlayers = hlayers

    hunits = hunits_list
    
    if site == 'vs30':
        vref = 760.
#        vref = 519.
    elif site == 'kappa':
        vref = 0.06
    else:
        vref = 760.
    
    lr = 0.01
    epochs = epochs
    
    if not os.path.exists(workinghome):
        os.mkdir(workinghome)
    
    if not os.path.exists(workinghome + '/' + foldername):
        os.mkdir(workinghome + '/' + foldername)
        os.mkdir(workinghome + '/' + foldername + '/testing')
        os.mkdir(workinghome + '/' + foldername + '/training')
        os.mkdir(workinghome + '/' + foldername + '/validation')
        os.mkdir(workinghome + '/' + foldername + '/curves')
    if not os.path.exists(workinghome + '/' + foldername+ '/curves'):
        os.mkdir(workinghome + '/' + foldername + '/curves')

    
    if site == 'vs30' or site == 'none':
        db = pickle.load(open('/Users/aklimase/Documents/GMM_ML/database_vs30.pckl', 'r'))
    else:
        db = pickle.load(open('/Users/aklimase/Documents/GMM_ML/database_kappa.pckl', 'r'))
    
    if site == '5coeff':
        d = {'mw': db.mw,'R': db.r,'sta': db.sta, 'pga': np.log(db.pga/9.81), 'elat': db.elat, 'elon': db.elon,'stlat': db.stlat,'stlon': db.stlon}
#        d = {'mw': db.mw,'R': db.r,'mw2': db.mw**2.,'logR': np.log(db.r),'sta': db.sta, 'pga': np.log(db.pga/9.81)}
    
    else:
        d = {'mw': db.mw,'R': db.r,'sta': db.sta,'vs30': db.vs30.flatten(), 'pga': np.log(db.pga/9.81), 'elat': db.elat, 'elon': db.elon,'stlat': db.stlat,'stlon': db.stlon}
    
    df = pd.DataFrame(data=d)
    
    train, test_valid = train_test_split(df, test_size=0.4, random_state = seed)
    valid, test = train_test_split(test_valid, test_size=1/2., random_state = seed)
    
    y_train = train['pga']
    x_train_sta = train['sta']
    x_train_coor = train[['elat', 'elon', 'stlat', 'stlon']]
    x_train =  train.drop(['pga','sta','elat', 'elon', 'stlat', 'stlon'], axis = 1)
    
    y_test = test['pga']
    x_test_sta = test['sta']
    x_test_coor = test[['elat', 'elon', 'stlat', 'stlon']]
    x_test =  test.drop(['pga','sta','elat', 'elon', 'stlat', 'stlon'], axis = 1)
    
    y_valid = valid['pga']
    x_valid_sta = valid['sta']
    x_valid_coor = valid[['elat', 'elon', 'stlat', 'stlon']]
    x_valid =  valid.drop(['pga','sta','elat', 'elon', 'stlat', 'stlon'], axis = 1)
    
    scaler = StandardScaler()
    scaler.fit(x_train)
    x_train_scale = scaler.transform(x_train)
    x_test_scale = scaler.transform(x_test)
    x_valid_scale = scaler.transform(x_valid)
    
    kfold = KFold(n_splits=fold, shuffle=True)
    split = 0
    
    ann_predicted_total_test = []
    ann_predicted_total_valid = []
    ann_predicted_total_train = []
    test_std_kfold = []
    ANN_list = []
    

    
    for train_index, test_index in kfold.split(x_train_scale):
        n = split
        X_traink, X_validk = x_train_scale[train_index], x_train_scale[test_index]
        y_traink, y_validk = np.asarray(y_train)[train_index], np.asarray(y_train)[test_index]


        if train_model == True:
            ANN = fitANN(act, hlayers, hunits, lr, epochs, X_traink, y_traink, X_validk, y_validk, foldername, workinghome)
            model_json = ANN.to_json()
            with open(workinghome + '/' +  foldername  + '/'+ 'model_' + str(n) + '.json', "w") as json_file:
                json_file.write(model_json)
                # serialize weights to HDF5
                ANN.save_weights(workinghome + '/' +  foldername  + '/'+ 'model_' + str(n) + '.h5')
                    
        #else the model is trained and saved already
        else:
            json_file = open(workinghome + '/' +  foldername   + '/' + 'model_' + str(n) + '.json', 'r')
            loaded_model_json = json_file.read()
            json_file.close()
            ANN = model_from_json(loaded_model_json, custom_objects={'GlorotUniform': glorot_uniform()})
            ANN.load_weights(workinghome + '/' +  foldername   + '/' + 'model_' + str(n) + '.h5')    
        
        ANN_list.append(ANN)

        ANN_test_predicted = ANN.predict(x_test_scale)
        ANN_test_predicted = ANN_test_predicted.flatten()
        test_std = np.std(y_test - ANN_test_predicted)
        print 'ANN  ' + str(split) + ' test_std: ' + str(test_std)
        test_std_kfold.append(test_std)
        ann_predicted_total_test.append(ANN_test_predicted)
        
        ANN_valid_predicted = ANN.predict(x_valid_scale)
        ANN_valid_predicted = ANN_valid_predicted.flatten()
        valid_std = np.std(y_valid - ANN_valid_predicted)
        print 'ANN  ' + str(split) + ' valid_std: ' + str(valid_std)
        ann_predicted_total_valid.append(ANN_valid_predicted)
    
#            ANN_train_predictedk = ANN.predict(X_traink)
#            ANN_train_predictedk = ANN_train_predictedk.flatten()
#            train_std = np.std(y_traink - ANN_train_predictedk)   
        ANN_train_predicted = ANN.predict(x_train_scale)
        ANN_train_predicted = ANN_train_predicted.flatten()
        train_std = np.std(y_train - ANN_train_predicted)
        print 'ANN  ' + str(split) + ' train_std: ' + str(train_std)
        ann_predicted_total_train.append(ANN_train_predicted)
        
#        setup_test_curves(site, scaler, workinghome, foldername, siteparams, [ANN], ndir = n)
#        residual_histo(y_traink, y_valid, y_test, ANN_train_predictedk, ANN_valid_predicted, ANN_test_predicted, workinghome, foldername, n=n)
        
        split+=1
        plt.close('all')

    #overall prediction is average of k folds  
    average_pre_test = np.average(ann_predicted_total_test, axis = 0)
    total_std = np.std(y_test - average_pre_test)
    average_pre_valid = np.average(ann_predicted_total_valid, axis = 0)
    valid_std = np.std(y_valid - average_pre_valid)
    average_pre_train = np.average(ann_predicted_total_train, axis = 0)
    train_std = np.std(y_train - average_pre_train)

    
    residual_histo(y_train, y_valid, y_test, average_pre_train, average_pre_valid, average_pre_test, workinghome, foldername, n='')
Ejemplo n.º 20
0
def ResNet50_manual(input_shape=(228, 304, 3), dropoutRate=0.3):
    """
    Implementation of the popular ResNet50 the following architecture:
    CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
    -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER

    Arguments:
    input_shape -- shape of the images of the dataset
    classes -- integer, number of classes

    Returns:
    model -- a Model() instance in Keras
    """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # Stage 1
    X = Conv2D(64, (7, 7),
               strides=(2, 2),
               name='conv1',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name='bn_conv1')(X)
    X = Activation('relu')(X)
    #X = Dropout(rate=dropoutRate)(X)
    X = MaxPooling2D((3, 3), strides=(2, 2))(X)

    # Stage 2
    X = convolutional_block(X,
                            f=3,
                            filters=[64, 64, 256],
                            stage=2,
                            block='a',
                            s=1)
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
    X = Dropout(rate=dropoutRate)(X)

    # Stage 3 (≈4 lines)
    X = convolutional_block(X,
                            f=3,
                            filters=[128, 128, 512],
                            stage=3,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')
    X = Dropout(rate=dropoutRate)(X)

    # Stage 4 (≈6 lines)
    X = convolutional_block(X,
                            f=3,
                            filters=[256, 256, 1024],
                            stage=4,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')
    X = Dropout(rate=dropoutRate)(X)

    # Stage 5 (≈3 lines)
    X = convolutional_block(X,
                            f=3,
                            filters=[512, 512, 2048],
                            stage=5,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
    X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
    X = Dropout(rate=dropoutRate)(X)

    # =============================================================================
    #     # AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)"
    #     X = AveragePooling2D(pool_size=(2, 2))(X)
    #
    #
    #     # output layer
    #     X = Flatten()(X)
    #     X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
    # =============================================================================

    #stage 6
    X = Conv2D(1024, (1, 1),
               name='conv6',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = upConv_block(X, 512, stage=6, block=1)
    X = upConv_block(X, 256, stage=6, block=2)
    X = upConv_block(X, 128, stage=6, block=3)
    X = upConv_block(X, 64, stage=6, block=4)
    X = upConv_block(X, 32, stage=6, block=5)

    #stage 7
    X = Conv2D(1, (3, 3),
               name='conv7',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = Activation('relu')(X)

    # Create model
    model = Model(inputs=X_input, outputs=X, name='ResNet50')

    return model
Ejemplo n.º 21
0
def ResNet50(input_shape=(150, 150, 3), classes=2):
    """
    Implementation of the popular ResNet50 the following architecture:
    CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
    -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER

    Arguments:
    input_shape -- shape of the images of the dataset
    classes -- integer, number of classes

    Returns:
    modelResNet -- a modelResNet() instance in Keras
    """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # Stage 1
    X = Conv2D(64, (7, 7),
               strides=(2, 2),
               name='conv1',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name='bn_conv1')(X)
    X = Activation('relu')(X)
    X = MaxPooling2D((3, 3), strides=(2, 2))(X)

    # Stage 2
    X = convolutional_block(X,
                            f=3,
                            filters=[64, 64, 256],
                            stage=2,
                            block='a',
                            s=1)
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')

    ### START CODE HERE ###

    # Stage 3 (≈4 lines)
    X = convolutional_block(X,
                            f=3,
                            filters=[128, 128, 512],
                            stage=3,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')

    # Stage 4 (≈6 lines)
    X = convolutional_block(X,
                            f=3,
                            filters=[256, 256, 1024],
                            stage=4,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')

    # Stage 5 (≈3 lines)
    X = convolutional_block(X,
                            f=3,
                            filters=[512, 512, 2048],
                            stage=5,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
    X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')

    # AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)"
    X = AveragePooling2D((2, 2), name="avg_pool")(X)

    ### END CODE HERE ###

    # output layer
    X = Flatten()(X)
    X = Dense(classes,
              activation='softmax',
              name='fc' + str(classes),
              kernel_initializer=glorot_uniform(seed=0))(X)

    # Create modelResNet
    modelResNet = Model(inputs=X_input, outputs=X, name='ResNet50')

    return modelResNet
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import CustomObjectScope
from tensorflow.keras.initializers import glorot_uniform


app = Flask(__name__)
bootstrap = Bootstrap(app)
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = "HARD_TO_GUESS"
app.config['WTF_CSRF_CHECK_DEFAULT'] = False
CsrfProtect(app)
modelConfiguration = r'darknet-yolo/obj.cfg'
modelWeights = r'darknet-yolo/obj_60000.weights'


with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
    charModel = load_model( r'charRecognition/model.h5')


UPLOAD_FOLDER = r'static/images'
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

for file in os.listdir(UPLOAD_FOLDER):
    os.remove(os.path.join(UPLOAD_FOLDER,file) )

@app.route('/',methods=['GET','POST'])
def home():
    output = ''
    form = uploadImage()
Ejemplo n.º 23
0
 def build(self, input_shape):
     super(ScoreLayer, self).build(input_shape)
     if self.use_global:
         self.global_bias=self.add_weight(shape=(1,),initializer=glorot_uniform(self.seed))
Ejemplo n.º 24
0
  def RNN_model(self, train, test, train_size, data, look_back = 1):
    """
    Recurrent Neural Network model.

    Args : 
    ------
    train :array_like
    test : array_lik
    train_size : int
    data: ndarray, DataFrame 
    look_back : float
    window size

    Returns:
    --------
    Plot of timeseies predection.
    Test Score RMSE and Normalized RMS
    """
    plt.style.use('default')

    # %%
    l2 = regularizers.l2
    dataset = data.values  # .as_matrix() will be decrepit in the future, switched to .values
    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(dataset) #fit scaler
    # reshape into X=t and Y=t+1
    # lookback is the window size
    trainX, trainY = create_dataset(train, look_back)
    testX, testY = create_dataset(test, look_back)
    print(data)

    # %%
    # reshape input to be [samples, time steps, features]
    trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
    testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

    # %%
   
    ##Train Model
    
    # %%
    # numEpoch: number of passes through the data

    numEpoch = 100
    vl_splt = 730.0 / 5475.0
    initBias = initializers.glorot_uniform(seed=0)
    initKernel = initializers.Orthogonal(gain=1.0, seed=0)


    # %%
    # create and fit the dense ann
    # first we need to flatten the extra dimension within the dataset
    flat_TrainX = np.reshape(trainX, (len(trainX), look_back))
    #print(testX.shape)
    flat_TestX = np.reshape(testX, (len(testX), 20))
    #print(flat_TestX.shape)

    # %%
    # create and fit the simpleRNN
    simpleRNN = Sequential()
    simpleRNN.add(SimpleRNN(8, 
                            input_shape=(1, look_back),
                            kernel_initializer = initKernel, 
                            bias_initializer = initBias,
                           # kernel_regularizer=l2(0.00001),
                           # recurrent_regularizer=l2(0.0001),
                            
                           bias_regularizer=l2(0.00001),
                           dropout=0.01,
                           #recurrent_dropout=0.01
                           ))


    simpleRNN.add(Dense(1, 
                        kernel_initializer = initKernel, 
                        bias_initializer = initBias
                       # kernel_regularizer=l2(0.001),
                    #    bias_regularizer=l2(0.001)
                       ))

    simpleRNN.compile(loss='mean_squared_error', optimizer='adam')
    simpleRNN.fit(trainX, trainY, epochs=numEpoch, 
                  batch_size=1, verbose=2, validation_split = vl_splt) #look into setting validation set

    # %%
    
    ##Test Model
   

    # %%
    trainY = scaler.inverse_transform([trainY])
    testY = scaler.inverse_transform([testY])

    # %%
    # make predictions
    RNN_trainPredict = simpleRNN.predict(trainX)
    RNN_testPredict = simpleRNN.predict(testX)
    # invert predictions
    RNN_trainPredict = scaler.inverse_transform(RNN_trainPredict)
    RNN_testPredict = scaler.inverse_transform(RNN_testPredict)

    # calculate root mean squared error
    RNN_trainScore = math.sqrt(mean_squared_error(trainY[0], RNN_trainPredict[:,0]))
    #print('Train Score: %.2f RMSE' % (RNN_trainScore))
    RNN_testScore = math.sqrt(mean_squared_error(testY[0], RNN_testPredict[:,0]))
    print('Test Score: %.2f RMSE' % (RNN_testScore))

    # normalized RMSE
    y_min, y_max  =  min(trainY[0]), max(trainY[0])
    yhat = y_max - y_min
    print("Normalized RMSE: %s" % (RNN_testScore/yhat))

    # %%
    # shift train predictions for plotting
    RNN_trainPredictPlot = np.empty_like(dataset)
    RNN_trainPredictPlot[:, :] = np.nan
    RNN_trainPredictPlot[look_back:len(RNN_trainPredict)+look_back, :] = RNN_trainPredict
    # shift test predictions for plotting
    RNN_testPredictPlot = np.empty_like(dataset)
    RNN_testPredictPlot[:, :] = np.nan
    RNN_testPredictPlot[len(RNN_trainPredict)+
                        (look_back*2)+1:len(dataset)-1, :] = RNN_testPredict

    # plot baseline and predictions
    plt.figure(figsize=(18,8))
    #plt.plot(scaler.inverse_transform(dataset))
    #plt.plot(RNN_trainPredictPlot)


    dates = data.index[train_size:-1].values

    RNN_testPredictPlot = [RNN_testPredictPlot[i][0] for i in range(train_size+1, len(RNN_testPredictPlot))]
    pred = pd.DataFrame({"Date": dates, "Volume": RNN_testPredictPlot})
    pred.Date = pd.to_datetime(pred.Date)
    pred.set_index("Date", inplace=True)

    plt.title("Water Volume (seasoned predictions)")
    plt.plot(data)
    plt.plot(pred)
    plt.show()
#------------------------------------------------------------------------------

# load data
labels_rand_train = np.load(dir_anfiles + "/labels_train_sounds.npy")
labels_rand_test = np.load(dir_anfiles + "/labels_test_sounds.npy")
an_l_rand_train = np.load(dir_anfiles + "/an_l_train_sounds.npy")
an_l_rand_test = np.load(dir_anfiles + "/an_l_test_sounds.npy")
an_r_rand_train = np.load(dir_anfiles + "/an_r_train_sounds.npy")
an_r_rand_test = np.load(dir_anfiles + "/an_r_test_sounds.npy")
print("Loading arrays completed")

# load model
#t.tic()
mymodel = load_model(dir_mofiles + "/A_" + modelname + ".h5",
                     custom_objects={
                         'GlorotUniform': glorot_uniform(),
                         "cust_mean_squared_error": cust_mean_squared_error,
                         "cos_distmet_2D_angular": cos_distmet_2D_angular
                     })
mymodel.summary()
#t.toc("loading the model took ")
print("Loading model completed")

#------------------------------------------------------------------------------
# Training
#------------------------------------------------------------------------------

# train the model
#t.tic()
history = mymodel.fit([an_l_rand_train, an_r_rand_train],
                      labels_rand_train,
Ejemplo n.º 26
0
def identity_block(X, f, filters, stage, block):
    """
    From Coursera shamelessly coppied...
    Implementation of the identity block as defined in Figure 4
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    f -- integer, specifying the shape of the middle CONV's window for the main
     path
    filters -- python list of integers, defining the number of filters in the
    CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in
    the network
    block -- string/character, used to name the layers, depending on their
    position in the network
    Returns:
    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
    """
    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    # Retrieve Filters
    F1, F2, F3 = filters

    X_shortcut = X

    # First component of main path
    X = Conv3D(filters=F1,
               kernel_size=(1, 1, 1),
               strides=(1, 1, 1),
               padding='valid',
               name=conv_name_base + '2a',
               kernel_initializer=glorot_uniform(seed=0))(X)

    # axis3 could be an issue
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    # Second component of main path
    X = Conv3D(filters=F2,
               kernel_size=(f, f, f),
               strides=(1, 1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0))(X)

    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = X = Activation('relu')(X)

    X = Conv3D(filters=F3,
               kernel_size=(1, 1, 1),
               strides=(1, 1, 1),
               padding='same',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0))(X)

    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    # Final step: Add shortcut value to main path, and pass it through a RELU
    # activation
    X = Add()([X_shortcut, X])
    X = Activation('relu')(X)

    return X
Ejemplo n.º 27
0
def conv_block(X, f, filters, activation, s=2):

    X_shortcut = X

    X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X)
    if epsilon != 0:
        X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
    X = Activation(activation)(X)

    X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
    if epsilon != 0:
        X = BatchNormalization(epsilon = epsilon, axis=axis)(X)

    X_shortcut = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    if epsilon != 0:
        X_shortcut = BatchNormalization(epsilon = epsilon, axis=axis)(X_shortcut)


    X = Add()([X, X_shortcut])
    X = Activation(activation)(X)

    return X
Ejemplo n.º 28
0
def HOT_RES_BACILLUS_03(input_shape, n_class):
    n_filters = 128
    # Input layer
    x = Input(shape=input_shape)

    conv_x = Conv2D(
        filters=100,
        kernel_size=(4, 15),
        padding='same'
    )(x)
    conv_x = BatchNormalization()(conv_x)
    conv_x = Activation('relu')(conv_x)

    conv_y = Conv2D(filters=n_filters, kernel_size=(1, 5), padding='same')(conv_x)
    conv_y = BatchNormalization()(conv_y)
    conv_y = Activation('relu')(conv_y)

    conv_z = Conv2D(filters=n_filters, kernel_size=(1, 3), padding='same')(conv_y)
    conv_z = BatchNormalization()(conv_z)

    # expand channels for the sum
    shortcut_y = Conv2D(filters=n_filters, kernel_size=(4, 1), padding='same')(x)
    shortcut_y = BatchNormalization()(shortcut_y)

    output_block_1 = Add()([shortcut_y, conv_z])
    output_block_1 = Activation('relu')(output_block_1)

    # BLOCK 2

    conv_x = Conv2D(filters=n_filters * 2, kernel_size=(1, 8), padding='same')(output_block_1)
    conv_x = BatchNormalization()(conv_x)
    conv_x = Activation('relu')(conv_x)

    conv_y = Conv2D(filters=n_filters * 2, kernel_size=(1, 5), padding='same')(conv_x)
    conv_y = BatchNormalization()(conv_y)
    conv_y = Activation('relu')(conv_y)

    conv_z = Conv2D(filters=n_filters * 2, kernel_size=(1, 3), padding='same')(conv_y)
    conv_z = BatchNormalization()(conv_z)

    # expand channels for the sum
    shortcut_y = Conv2D(filters=n_filters * 2, kernel_size=(1, 1), padding='same')(output_block_1)
    shortcut_y = BatchNormalization()(shortcut_y)

    output_block_2 = Add()([shortcut_y, conv_z])
    output_block_2 = Activation('relu')(output_block_2)

    # BLOCK 3

    conv_x = Conv2D(filters=n_filters * 2, kernel_size=(1, 8), padding='same')(output_block_2)
    conv_x = BatchNormalization()(conv_x)
    conv_x = Activation('relu')(conv_x)

    conv_y = Conv2D(filters=n_filters * 2, kernel_size=(1, 5), padding='same')(conv_x)
    conv_y = BatchNormalization()(conv_y)
    conv_y = Activation('relu')(conv_y)

    conv_z = Conv2D(filters=n_filters * 2, kernel_size=(1, 3), padding='same')(conv_y)
    conv_z = BatchNormalization()(conv_z)

    # no need to expand channels because they are equal
    shortcut_y = BatchNormalization()(output_block_2)

    output_block_3 = Add()([shortcut_y, conv_z])
    output_block_3 = Activation('relu')(output_block_3)

    gap_layer = GlobalAveragePooling2D()(output_block_3)
    # X = AveragePooling2D((1, 2), name="avg_pool")(X)

    # Fully connected layers
    # X = Flatten()(X)
    X = Dropout(.2)(gap_layer)
    outputs = Dense(n_class, activation='sigmoid', name='fc' + str(n_class), kernel_initializer=glorot_uniform(seed=0))(
        X)

    # Create model object
    model = models.Model(inputs=[x], outputs=[outputs])

    return model
Ejemplo n.º 29
0
    X = Activation('relu')(X)

    return X


input_shape = (96, 96, 1)

# Input tensor shape
X_input = Input(input_shape)

# Zero-padding
X = ZeroPadding2D((3, 3))(X_input)

# 1 - stage
X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1',
           kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name='bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)

# 2 - stage
X = res_block(X, filter=[64, 64, 256], stage=2)

# 3 - stage
X = res_block(X, filter=[128, 128, 512], stage=3)


# Average Pooling
X = AveragePooling2D((2, 2), name='Averagea_Pooling')(X)

# Final layer
Ejemplo n.º 30
0
def create_model(input_size, hyper_param):
    """ Creates a model using the provided hyperparamaters

        Arguments:
            input_size - number of inputs provided to network
            hyper_param - instance of cHyperParameters defining hyperparamters to be used
    """

    #TODO: add paramater for kernel and activity regularized
    #TODO: add paramater for activation function
    #TODO: Try KL-Divergence Sparse Autoencoder
    act_func = 'elu'

    # Input layer:
    model=Sequential()

    #ENCODER
    # First hidden layer, connected to input vector X
    model.add(Dropout(hyper_param.drop_out[0], input_shape=(input_size,)))

    model.add(Dense(hyper_param.layers[0],
                    activation=act_func,
                    kernel_initializer=initializers.glorot_uniform(seed=hyper_param.random_seed),
                    activity_regularizer=regularizers.l1(hyper_param.kernel_reg[0]),
                    #kernel_regularizer=regularizers.l2(kernel_reg[i]), 
                    ))

    model.add(Dropout(hyper_param.drop_out[1]))
    #print("Encoder: Added layer -> " + str(input_size) + ":" + str(hyper_param.layers[0]) + " k_reg: " + str(hyper_param.kernel_reg[0]) + " drop_out: " + str(hyper_param.drop_out[0]))

    for i in range(1,len(hyper_param.layers)-1):
        model.add(Dense(hyper_param.layers[i],
                        activation=act_func,
                        #kernel_regularizer=regularizers.l2(kernel_reg[i]),
                        activity_regularizer=regularizers.l1(hyper_param.kernel_reg[i]),
                        kernel_initializer=initializers.glorot_uniform(seed=hyper_param.random_seed)))
        model.add(Dropout(hyper_param.drop_out[i+1]))
        #print("Encoder: Added layer -> " + str(hyper_param.layers[i]) + " k_reg: " + str(hyper_param.kernel_reg[i]) + " i is " + str(i))


    #BOTTLENECK
    model.add(Dense(hyper_param.layers[-1],
                    activation=act_func,
                    #kernel_regularizer=regularizers.l2(kernel_reg[-1]),
                    kernel_initializer=initializers.glorot_uniform(seed=hyper_param.random_seed)))
    #print("Bottleneck: Added layer - nodes: " + str(hyper_param.layers[-1]) + " k_reg: " + str(hyper_param.kernel_reg[-1]))

    #DECODER
    for i in range(len(hyper_param.layers)-2,-1,-1):
        model.add(Dense(hyper_param.layers[i],
                        activation=act_func,
                        #kernel_regularizer=regularizers.l2(kernel_reg[i]),
                        activity_regularizer=regularizers.l1(hyper_param.kernel_reg[i]),
                        kernel_initializer=initializers.glorot_uniform(seed=hyper_param.random_seed)))
        #print("Decoder: Added layer -> " + str(hyper_param.layers[i]) + " k_reg: " + str(hyper_param.kernel_reg[i]) + " i is " + str(i))

    model.add(Dense(input_size,
                    #kernel_regularizer=regularizers.l2(0.001),
                    kernel_initializer=initializers.glorot_uniform(seed=hyper_param.random_seed)))

    #print("Decoder: Added layer -> " + str(input_size) + " k_reg: " + str(hyper_param.kernel_reg[-1]))
    
    #TODO: Check for other loss functions
    #https://medium.com/@syoya/what-happens-in-sparse-autencoder-b9a5a69da5c6
    model.compile(loss='mse',optimizer='adam')
    
    print("\nModel Summary...")
    print(model.summary())

    return model