Example #1
0
def buildClassifier(input_shape=(100, 100, 3)):
    # Initialising the CNN
    classifier = Sequential()
    classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='same'))
    classifier.add(MaxPooling2D(pool_size=(4, 4), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    classifier.add(Flatten())
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256, activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
    return classifier
Example #2
0
    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        mc = self.config
        in_x = x = Input((12, 8, 8))

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)

        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="chess_model")
Example #3
0
def LSTM(nb_channels=3, img_size=32,time_slot = 10,num_color_chan=1):
    """
    Recurrent Neural Network for image data
    Never tested.
    :param nb_channels: number of class
    :param img_size: image size
    :param time_slot: number of frames/images in a video, length of the video
    :param num_color_chan = number of color channel in the image/frame
    """
    model = Sequential()
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))

    # flatten and check
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Dropout(rate=0.5))
    model.add(Dense(nb_channels, activation='softmax'))
    
    return model
def initialize_model():

    model = Sequential()
    model.add(
        Conv2D(40, 11, strides=1, padding='same', input_shape=(1, 1024, 4)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(40, 11, strides=1, padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(1, 64)))

    model.add(Flatten())

    model.add(Dense(units=500))

    model.add(Dense(units=640))

    model.add(Reshape((1, 16, 40)))

    model.add(Conv2DTranspose(40, 11, strides=(1, 64), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2DTranspose(40, 11, strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(4, 11, strides=1, padding='same', activation='sigmoid'))
    model.summary()
    model.compile(optimizer='adam', loss='mse')

    return model
Example #5
0
def Baseline_NN(nb_channels=3, dropoutRate = 0.5, act = 'relu', k_regularizer = regularizers.l2(0.001), input_dimension = 256):
    """ 
    Fully connected dense neural network

    :param nb_channels: number of class
    :param dropoutRate: drop-out rate of last layer
    :param act: activation function
    :param k_regularizer: kernel regularizer
    :param input_dimension: size of the input 
    """
    model = Sequential()
    # first layer
    model.add(Dense(128,   # or 100
                    input_dim=input_dimension, 
                    kernel_initializer='glorot_uniform', activation='sigmoid', kernel_regularizer=k_regularizer))
                    # 'normal', initializers.Constant(value=0), ...
                    #kernel_regularizer=regularizers.l2(0.01),  # smooth filters, but bad accuracy
                    #activation='sigmoid'))  # 'relu', 'sigmoid', 'tanh', ...
    # second layer
    model.add(Dense(64, kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    # third layer
    model.add(Dense(32, kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    # fourth layer
    model.add(Dense(16, kernel_initializer='glorot_uniform',  activation=act, kernel_regularizer=k_regularizer))
    # drop-out layer to prevent overfitting
    model.add(Dropout(rate=dropoutRate))
    
    # last layer
    model.add(Dense(nb_channels ,kernel_initializer='glorot_uniform',activation='softmax'))

    
    return model
Example #6
0
def initialize_model():

    one_filter_keras_model = Sequential()
    one_filter_keras_model.add(
        Conv2D(filters=40,
               kernel_size=(1, 11),
               padding="same",
               input_shape=(1, 1500, 5),
               kernel_constraint=NonNeg()))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))

    one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 30)))

    one_filter_keras_model.add(Flatten())
    one_filter_keras_model.add(Dense(40))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))
    one_filter_keras_model.add(Dropout(0.5))

    one_filter_keras_model.add(Dense(1))
    one_filter_keras_model.add(Activation("sigmoid"))

    one_filter_keras_model.summary()
    one_filter_keras_model.compile(optimizer='adam',
                                   loss='binary_crossentropy',
                                   metrics=[precision, recall, specificity])

    return one_filter_keras_model
Example #7
0
    def fit(self, X, y, X_val, y_val):
        ## scaler
        #        self.scaler = StandardScaler()
        #        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1], )))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))

        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        logger.info(self.model.summary())

        ## callback
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=1e-2,
                                       patience=10,
                                       verbose=0,
                                       mode='auto')

        cb_my = LossHistory()

        ## fit
        self.model.fit(X,
                       y,
                       epochs=self.epochs,
                       batch_size=self.batch_size,
                       validation_data=[X_val, y_val],
                       callbacks=[early_stopping, cb_my],
                       verbose=1)
        return self
Example #8
0
    def __init__(self):
        ''' The Constructor '''
        self.x_train, self.y_train, self.x_test, self.y_test = load_external_data(
        )
        # # If you wish to use the mnist dataset, uncomment the line below
        # (self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data()
        self.x_train = self.x_train.reshape(60000, 784)
        self.x_train = self.x_train.astype('float32')
        self.x_test = self.x_test.astype('float32')
        self.x_train /= 255
        self.x_test /= 255

        # Encoding labels. Example 4 becomes [0,0,0,0,1,0,0,0,0,0]
        n_classes = 10
        self.y_train = np_utils.to_categorical(self.y_train, n_classes)
        self.y_test = np_utils.to_categorical(self.y_test, n_classes)

        # building a linear stack of layers with the sequential model
        self.model = Sequential()
        self.model.add(Dense(512, input_shape=(784, )))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.2))

        self.model.add(Dense(512))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.2))

        self.model.add(Dense(10))
        self.model.add(Activation('softmax'))

        # compiling the sequential model
        self.model.compile(loss='categorical_crossentropy',
                           metrics=['accuracy'],
                           optimizer='adam')
Example #9
0
def discriminator_model():
    """Build discriminator architecture."""
    n_layers, use_sigmoid = 3, False
    inputs = Input(shape=input_shape_discriminator)

    x = Conv2D(filters=ndf, kernel_size=(4, 4), strides=2, padding='same')(inputs)
    x = LeakyReLU(0.2)(x)

    nf_mult, nf_mult_prev = 1, 1
    for n in range(n_layers):
        nf_mult_prev, nf_mult = nf_mult, min(2**n, 8)
        x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=2, padding='same')(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.2)(x)

    nf_mult_prev, nf_mult = nf_mult, min(2**n_layers, 8)
    x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)

    x = Conv2D(filters=1, kernel_size=(4, 4), strides=1, padding='same')(x)
    if use_sigmoid:
        x = Activation('sigmoid')(x)

    x = Flatten()(x)
    x = Dense(1024, activation='tanh')(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x, name='Discriminator')
    return model
Example #10
0
def buildClassifier(input_shape=(100, 100, 3)):
    """
    This creates the CNN algorithm.
    Args:
        input_shape(tuple): This is the image shape of (100,100,3)
    Returns:
        classifier(sequential): This is the sequential model.
    """
    # Initialising the CNN
    opt = Adam(lr=0.0002)  # lr = learning rate
    classifier = Sequential()
    classifier.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape,
               padding='same'))
    classifier.add(MaxPooling2D(pool_size=(3, 3), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(
        Flatten())  # This is added before dense layer a flatten is needed
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    #classifier.add(Flatten())
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256,
                         activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer=opt,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier,
               to_file='model_plot.png',
               show_shapes=True,
               show_layer_names=True)
    return classifier
Example #11
0
 def createModel(self):
     base_model=MobileNet(input_shape=(64,64,3),weights=None,include_top=False) 
     x=base_model.output
     x=Flatten()(x)
     x=Dense(1024,activation='relu')(x) 
     x=Dense(512,activation='relu')(x) 
     x=Dense(256,activation='relu')(x) 
     preds=Dense(7,activation='softmax')(x) 
     
     model=Model(inputs=base_model.input,outputs=preds)
     
     return model
Example #12
0
def CNN_Image_Multi(nb_channels=3, dropoutRate = 0.5, act='relu', k_size=3, d_layer = 512, 
	k_regularizer = regularizers.l2(0.001), img_size=32,num_color_chan = 2):
   """ 
    Deep convolutional 2D neural network with softmax classifier

    :param nb_channels: number of class
    :param dropoutRate: drop-out rate of last layer
    :param act: activation function
    :param k_size: convolutional kernel size
    :param k_regularizer: kernel regularizer
    :param d_layer: number of hidden unit in the last layer
    :param img_size: image size
    :param num_color_chan = number of color channel in the image, no RGB values used but delta and beta-gamma power
    values of electrodes are used
    :param input_dimension: size of the input 
 
   Expecting 32x32x2 image data as input
   
   Conv2D<32> - Conv2D<32> - Conv2D<32> - Conv2D<32> - MaxPool2D<2,2> - 
   Conv2D<64> - Conv2D<64> - MaxPool2D<2,2> - 
   Conv2D<128> -  MaxPool2D<2,2> -
   Dense<512> - Dense<3>
   
    """
    strides = None
    print('PARAMETERS OF MODELS: ', act, ' ', k_size, ' ', d_layer)
    # create model
    model = Sequential()
    # add layers
    # activation is NONE  

    model.add(Conv2D(32, kernel_size=k_size, input_shape=(img_size,img_size,num_color_chan), 
    	kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer  ))
    model.add(Conv2D(32, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer) )
    model.add(Conv2D(32, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer) )
    model.add(Conv2D(32, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer) )
    model.add(MaxPooling2D(pool_size=(2, 2), strides=strides,  padding='valid', data_format='channels_last'))
    # after max-pooling
    model.add(Conv2D(64, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    model.add(Conv2D(64, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=strides,  padding='valid', data_format='channels_last'))
    #another max-pooling
    model.add(Conv2D(128, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    model.add(MaxPooling2D(pool_size=(2, 2),  strides=strides, padding='valid', data_format='channels_last'))
    # fully connected layer
    model.add(Flatten())
    model.add(Dense(d_layer))
    model.add(Dropout(rate=dropoutRate))
    model.add(Dense(nb_channels, activation='softmax'))
    
    return model
Example #13
0
def CNN_Video(nb_channels=3, dropoutRate = 0.5, act='relu', k_size=3, d_layer = 512, 
	k_regularizer = regularizers.l1(0.001), img_size=32,time_slot = 100,num_color_chan=1):

    """ 
    Deep convolutional 3D neural network with softmax classifier

    :param nb_channels: number of class
    :param dropoutRate: drop-out rate of last layer
    :param act: activation function
    :param k_size: convolutional kernel size
    :param k_regularizer: kernel regularizer
    :param d_layer: number of hidden unit in the last layer
    :param img_size: image size
    :param time_slot: number of frames/images in a video, length of the video
    :param num_color_chan = number of color channel in the image/frame, no RGB values used real values of electrodes are used
    :param input_dimension: size of the input 
 
   Expecting 100x32x32x1 video data as input
   
   Conv3D<32> - Conv3D<32> - Conv3D<32> - Conv3D<32> - MaxPool3D<2,2,2> - 
   Conv3D<64> - Conv3D<64> - MaxPool3D<2,2,2> - 
   Dense<512> - Dense<3>
   
    """
 
    strides = None
    # In each convolutional layer, 10 consecutive images are convolved
    kernel = (10, k_size, k_size)

    print('PARAMETERS OF MODELS: ', act, ' ', k_size, ' ', d_layer)
  
    model = Sequential()
    # add layers
    model.add(Conv3D(32, kernel_size=kernel, input_shape=(time_slot,img_size,img_size,num_color_chan), activation=act))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(MaxPooling3D(pool_size=kernel, strides=strides, data_format='channels_last'))
    # new layer
    model.add(Conv3D(64, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act))
    model.add(Conv3D(64, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act))
    model.add(MaxPooling3D(pool_size=(2,2,2),strides=strides, data_format='channels_last'))
    
    # flatten and check
    model.add(Flatten())
    model.add(Dense(d_layer))
    model.add(Dropout(rate=dropoutRate))
    model.add(Dense(nb_channels, activation='softmax'))
    
    return model
Example #14
0
def build_discriminator(data_dim, num_classes):
    model = Sequential()
    model.add(Dense(31, input_dim=data_dim))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dropout(0.25))
    model.add(Dense(16, input_dim=data_dim))
    model.add(LeakyReLU(alpha=0.2))
    
    model.summary()
    trans = Input(shape=(data_dim,))
    features = model(trans)
    valid = Dense(1, activation="sigmoid")(features)
    label = Dense(num_classes+1, activation="softmax")(features)
    return Model(trans, [valid, label])
Example #15
0
    def __init__(self,
                 state_size: int,
                 action_size: int,
                 representation_size: int,
                 max_value: int,
                 hidden_neurons: int = 64,
                 weight_decay: float = 1e-4,
                 representation_activation: str = 'tanh'):
        self.state_size = state_size
        self.action_size = action_size
        self.value_support_size = math.ceil(math.sqrt(max_value)) + 1

        regularizer = regularizers.l2(weight_decay)
        representation_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(representation_size,
                  activation=representation_activation,
                  kernel_regularizer=regularizer)
        ])
        value_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(self.value_support_size, kernel_regularizer=regularizer)
        ])
        policy_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(action_size, kernel_regularizer=regularizer)
        ])
        dynamic_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(representation_size,
                  activation=representation_activation,
                  kernel_regularizer=regularizer)
        ])
        reward_network = Sequential([
            Dense(16, activation='relu', kernel_regularizer=regularizer),
            Dense(1, kernel_regularizer=regularizer)
        ])

        super().__init__(representation_network, value_network, policy_network,
                         dynamic_network, reward_network)
Example #16
0
 def testKerasModel(self):
   model = Sequential(
       [Dense(10, input_shape=(100,)),
        Activation('relu', name='my_relu')])
   summary = self.keras_model(name='my_name', data=model, step=1)
   first_val = summary.value[0]
   self.assertEqual(model.to_json(), first_val.tensor.string_val[0])
Example #17
0
def Simple_CNN():
    """     
    Simple convolutional neural network, implemented for comparison purposes.    

   Expecting 32x32x1 image data as input
   
   Conv2D<64> - Conv2D<32> -  Dense<3>
   kernel_initializer = None
   activation = relu
   kernel_size = 3
   padding = valid(default)
   
    """
    
    # create model
    model = Sequential()
    # add concolutional layers
    model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(32,32,1)))
    model.add(Conv2D(32, kernel_size=3, activation='relu'))
    # fully connected layer
    model.add(Flatten())
    model.add(Dropout(rate=0.35))
    model.add(Dense(3, activation='softmax'))

    
    return model
Example #18
0
File: rnn.py Project: ss-koishi/RNN
def main():

    sin = md.make_noised_sin()
    size = 60
    (x_train, y_train), (x_test, y_test) = train_test_split(sin, n_prev = size)

    model = Sequential([
        LSTM(hidden, batch_input_shape=(None, size, io), return_sequences=False),
        Dense(io),
        Activation('linear')
    ])

    model.compile(loss='mean_squared_error', optimizer='adam')
    stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=10)
    model.fit(x_train, y_train, batch_size=256, nb_epoch=1000, validation_split=0.1, callbacks=[stopping])

    result = []
    future_steps = 1000
    future_data = [x_train[-1:][-1:][-size:]]
    for i in range(future_steps):
      print(i)
      pred = model.predict(future_data)
      future_data = np.delete(future_data, 0)
      future_data = np.append(future_data, pred[-1:]).reshape(1, size, 1)
      result = np.append(result, pred[-1:])

    plt.figure()
    #plt.plot(y_test.flatten())
    output = y_train[-100:]
    plt.plot(range(0, len(output)), output, label='input')
    plt.plot(range(len(output), len(output) + future_steps), result, label='future')
    plt.title('Sin Curve prediction')
    plt.legend(loc='upper right')
    plt.savefig('result.png')
Example #19
0
def get_model():
    """
    Builds and returns a feed forward neural net
    :return: keras sequential model
    """

    model = tf.keras.Sequential()
    model.add(Dense(250, input_dim=13, activation=tf.nn.relu))
    model.add(Dropout(0.4))
    model.add(Dense(200, activation=tf.nn.relu))
    model.add(Dropout(0.4))
    model.add(Dense(100, activation=tf.nn.relu))
    model.add(Dropout(0.3))
    model.add(Dense(50, activation=tf.nn.relu))
    model.add(Dense(1, activation=tf.nn.sigmoid))
    return model
Example #20
0
    def build(width, height, depth, classes):
        # initialize the model along with the input shape to be
        # "channels last"

        model = Sequential()
        #the image input
        inputShape = (height, width, depth)

        # if we are using "channels first", update the input shape
        if image_data_format() == "channels_first":
            inputShape = (depth, height, width)

        #Every CNN that you implement will have a build method this function will accept a
        #number of parameters, construct the network architecture, and then return it to the calling function
        #It will accept a number of parameters

        #define the first (and only) CONV=>RELU layer
        #This layer will have 32 filters each of which are 3x3, apply the asame padding 
        #to ensure the size of the output of the convolution operations matches the input
        #(using same padding isn't strictly neccessary for this example, but it's a good)
        #habbit to start forming now
        model.add(Conv2D(32,(3,3),padding="same"))
        model.add(Activation("relu"))

        #softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        #return the constructed network architechture
        return model
Example #21
0
    def build(width, height, depth, classes):
        # depth refers to RGB image
        # initialize the model
        model = Sequential()
        inputShape = (height, width, depth)

        # if we are using "channels first", update the input shape
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)

        # first set of CONV => RELU => POOL layers
        model.add(Conv2D(20, (3, 3), padding="same", input_shape=inputShape))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.23))

        # second set of CONV => RELU => POOL layers
        model.add(Conv2D(50, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.23))
        #3
        model.add(Conv2D(80, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.23))
        #4
        model.add(Conv2D(128, (3, 3), padding="same"))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.23))

        # first (and only) set of FC => RELU layers
        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation("relu"))

        # softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        # return the constructed network architecture
        return model
Example #22
0
    def build(height, width, depth, classes):
        # initialize the model
        model = Sequential()
        #the shape of our image inputs
        inputShape = (height, width, depth)
        #if we are using "channels first" update the input shape
        if (K.image_data_format() == "channels_first"):
            inputShape = (depth, height, width)

        #first set of CONV=>RELU=> POOL layers
        model.add(
            Conv2D(24, (5, 5),
                   strides=(2, 2),
                   padding="valid",
                   input_shape=inputShape))
        model.add(Activation('relu'))

        #second set of 5x5 CONV=>RELU=> POOL layers
        model.add(Conv2D(36, (5, 5), strides=(2, 2), padding="valid"))
        model.add(Activation('relu'))

        #third set of 5x5 CONV=>RELU=> POOL layers
        model.add(Conv2D(48, (5, 5), strides=(2, 2), padding="valid"))
        model.add(Activation('relu'))

        #first set of 3x3 CONV=>RELU=> POOL layers
        model.add(Conv2D(64, (3, 3), padding="valid"))
        model.add(Activation('relu'))

        #second set of 3x3 CONV=>RELU=> POOL layers
        model.add(Conv2D(64, (3, 3), padding="valid"))
        model.add(Activation('relu'))

        #set of fully connected layers
        model.add(Flatten())
        model.add(Dense(1164))
        model.add(Activation('relu'))
        model.add(Dense(100))
        model.add(Activation('relu'))
        model.add(Dense(10))
        model.add(Activation('relu'))

        #output
        model.add(Dense(classes))
        model.add(Activation('tanh'))

        return model
Example #23
0
	def build(width, height, depth, classes):
		# initialize the model along with the input shape to be
		# "channels last" and the channels dimension itself
		model = Sequential()
		inputShape = (height, width, depth)
		chanDim = -1

		# if we are using "channels first", update the input shape
		# and channels dimension
		if K.image_data_format() == "channels_first":
			inputShape = (depth, height, width)
			chanDim = 1

		# first CONV => RELU => CONV => RELU => POOL layer set
		model.add(Conv2D(32, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(32, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# second CONV => RELU => CONV => RELU => POOL layer set
		model.add(Conv2D(64, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(64, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# first (and only) set of FC => RELU layers
		model.add(Flatten())
		model.add(Dense(512))
		model.add(Activation("relu"))
		model.add(BatchNormalization())
		model.add(Dropout(0.5))

		# softmax classifier
		model.add(Dense(classes))
		model.add(Activation("softmax"))

		# return the constructed network architecture
		return model
Example #24
0
    def fit(self, X, y):
        ## scaler
        self.scaler = StandardScaler()
        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1],)))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))
        
        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        ## fit
        self.model.fit(X, y,
                    epochs=self.nb_epoch, 
                    batch_size=self.batch_size,
                    validation_split=0, verbose=1)
        return self
Example #25
0
def recommendByAI(data):

    # create model type
    model = Sequential()

    # add main layers to neural network
    model.add(Dense(128, activation="relu"))
    model.add(Dense(128, activation="relu"))
    model.add(Dense(128, activation="relu"))

    # final layer is dense 5 as this is the amount of classes (ratings 1 to 5 stars)
    model.add(Dense(5, activation="softmax"))

    # compile
    model.compile(optimizer="adam", loss="categorical_crossentropy")

    # fit model
    model.fit(x=data, batch_size=20, epochs=10)
Example #26
0
    def build(input_shape_width, input_shape_height, classes, 
              weight_path = '', input_shape_depth = 3):
        '''
        weight_path: a .hdf5 file. If exists, we can load model.
        '''
        
        # initialize the model
        model = Sequential()
        
        input_shape = (input_shape_height, input_shape_width, 
                       input_shape_depth)
        # if we are using "channels first", update the input shape
        if K.image_data_format() == 'channels_first':
             input_shape = (input_shape_depth, input_shape_height, 
                            input_shape_width)
        
        # first Convolution + relu + pooling layer
        model.add(Conv2D(filters = 20, kernel_size = (5, 5), 
                         padding = 'same', input_shape = input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size = (2, 2), strides=(2, 2)))
        
        # second convolutional layer
        model.add(Conv2D(filters = 50, kernel_size = (5, 5), 
                         padding = 'same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        
        # Flattening
        model.add(Flatten())

        # Full connection
        model.add(Dense(units = 500))
        model.add(Activation('relu'))

        # output layer
        model.add(Dense(units = classes))
        model.add(Activation('softmax'))

        if weight_path:
            model.load_weights(weight_path)

        # return the constructed network architecture
        return model
Example #27
0
    def build(width, height, depth, classes):

        # Model initialization
        model = Sequential()
        input_shape = (height, width, depth)
        chan_dim = -1

        # Data formatting
        if k.image_data_format() == "channels_first":
            chan_dim = 1

        # First layer set
        model.add(Conv2D(16, (3, 3), padding="same", input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(Conv2D(16, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Second layer set
        model.add(Conv2D(32, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(Conv2D(32, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Third layer set
        model.add(Flatten())
        model.add(Dense(64))
        model.add(Activation("relu"))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        # Softmax classification layer set
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model
Example #28
0
def build_generator(latent_dim, data_dim):
    model = Sequential()

    model.add(Dense(16, input_dim=latent_dim))
    
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(32, input_dim=latent_dim))
    
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(data_dim, activation='tanh'))

    model.summary()

    noise = Input(shape=(latent_dim,))
    trans = model(noise)

    return Model(noise, trans)
Example #29
0
def make_model(dimData):
    model = Sequential()

    model.add(Dense(1024, input_shape=(dimData, )))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))

    model.add(Dense(1024))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))

    model.add(Dense(CLASSES))
    model.add(Activation("softmax"))

    model.compile(loss="categorical_crossentropy",
                  optimizer='adam',
                  metrics=["accuracy"])

    return model
def squeeze_and_excitation_layer(lst_layer, r=16, activation='relu'):
    """
	Squeeze and excitation layer.
	:param lst_layer: keras layer. Layer to append the SE block to.
	:param r: int. Ratio to squeeze the number of channels as defined in the original paper (default: 16)
	:param activation: str. Name of non-linear activation to use. ReLU is the default one.
	:return: keras layer. Next layer (output)
	"""
    num_channels = int(lst_layer.get_shape()[-1])
    gap = GlobalAveragePooling2D()(lst_layer)
    reduct = Dense(num_channels // r,
                   activation=activation,
                   kernel_initializer='orthogonal')(gap)
    expand = Dense(num_channels,
                   activation='sigmoid',
                   kernel_initializer='orthogonal')(reduct)
    return Multiply()(
        [lst_layer,
         expand])  # Broadcast adds dimensions at the beginning of expand