コード例 #1
0
ファイル: feat7_nn.py プロジェクト: nsakki55/kaggle
    def fit(self, X, y, X_val, y_val):
        ## scaler
        #        self.scaler = StandardScaler()
        #        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1], )))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))

        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        logger.info(self.model.summary())

        ## callback
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=1e-2,
                                       patience=10,
                                       verbose=0,
                                       mode='auto')

        cb_my = LossHistory()

        ## fit
        self.model.fit(X,
                       y,
                       epochs=self.epochs,
                       batch_size=self.batch_size,
                       validation_data=[X_val, y_val],
                       callbacks=[early_stopping, cb_my],
                       verbose=1)
        return self
コード例 #2
0
    def create_posla_net(self, raw=120, column=320, channel=1):
        # model setting

        inputShape = (raw, column, channel)

        activation = 'relu'
        keep_prob_conv = 0.25
        keep_prob_dense = 0.5

        # init = 'glorot_normal'
        # init = 'he_normal'
        init = 'he_uniform'
        chanDim = -1
        classes = 3

        model = Sequential()

        # CONV => RELU => POOL
        model.add(
            Conv2D(3, (3, 3),
                   padding="valid",
                   input_shape=inputShape,
                   kernel_initializer=init,
                   activation=activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(9, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(18, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(32, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())

        model.add(Dense(80, kernel_initializer=init, activation=activation))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(15, kernel_initializer=init, activation=activation))
        model.add(Dropout(keep_prob_dense))

        # softmax classifier
        model.add(Dense(classes, activation='softmax'))

        self.model = model
コード例 #3
0
def bbox_3D_net(input_shape=(224, 224, 3), vgg_weights=None, freeze_vgg=False, bin_num=6):
    vgg16_model = VGG16(include_top=False, weights=vgg_weights, input_shape=input_shape)

    if freeze_vgg:
        for layer in vgg16_model.layers:
            layer.trainable = False

    x = Flatten()(vgg16_model.output)

    dimension = Dense(512)(x)
    dimension = LeakyReLU(alpha=0.1)(dimension)
    dimension = Dropout(0.5)(dimension)
    dimension = Dense(3)(dimension)
    dimension = LeakyReLU(alpha=0.1, name='dimension')(dimension)

    orientation = Dense(256)(x)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Dropout(0.5)(orientation)
    orientation = Dense(bin_num * 2)(orientation)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Reshape((bin_num, -1))(orientation)
    orientation = Lambda(l2_normalize, name='orientation')(orientation)

    confidence = Dense(256)(x)
    confidence = LeakyReLU(alpha=0.1)(confidence)
    confidence = Dropout(0.5)(confidence)
    confidence = Dense(bin_num, activation='softmax', name='confidence')(confidence)

    model = Model(vgg16_model.input, outputs=[dimension, orientation, confidence])
    return model
コード例 #4
0
    def __init__(self):
        ''' The Constructor '''
        self.x_train, self.y_train, self.x_test, self.y_test = load_external_data(
        )
        # # If you wish to use the mnist dataset, uncomment the line below
        # (self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data()
        self.x_train = self.x_train.reshape(60000, 784)
        self.x_train = self.x_train.astype('float32')
        self.x_test = self.x_test.astype('float32')
        self.x_train /= 255
        self.x_test /= 255

        # Encoding labels. Example 4 becomes [0,0,0,0,1,0,0,0,0,0]
        n_classes = 10
        self.y_train = np_utils.to_categorical(self.y_train, n_classes)
        self.y_test = np_utils.to_categorical(self.y_test, n_classes)

        # building a linear stack of layers with the sequential model
        self.model = Sequential()
        self.model.add(Dense(512, input_shape=(784, )))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.2))

        self.model.add(Dense(512))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.2))

        self.model.add(Dense(10))
        self.model.add(Activation('softmax'))

        # compiling the sequential model
        self.model.compile(loss='categorical_crossentropy',
                           metrics=['accuracy'],
                           optimizer='adam')
コード例 #5
0
def buildClassifier(input_shape=(100, 100, 3)):
    # Initialising the CNN
    classifier = Sequential()
    classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='same'))
    classifier.add(MaxPooling2D(pool_size=(4, 4), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    classifier.add(Flatten())
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256, activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
    return classifier
コード例 #6
0
def buildClassifier(input_shape=(100, 100, 3)):
    """
    This creates the CNN algorithm.
    Args:
        input_shape(tuple): This is the image shape of (100,100,3)
    Returns:
        classifier(sequential): This is the sequential model.
    """
    # Initialising the CNN
    opt = Adam(lr=0.0002)  # lr = learning rate
    classifier = Sequential()
    classifier.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape,
               padding='same'))
    classifier.add(MaxPooling2D(pool_size=(3, 3), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(
        Flatten())  # This is added before dense layer a flatten is needed
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    #classifier.add(Flatten())
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256,
                         activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer=opt,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier,
               to_file='model_plot.png',
               show_shapes=True,
               show_layer_names=True)
    return classifier
コード例 #7
0
def performRNNlass(X_train, X_test, y_train, y_test, forcast_scaled):

    X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
    X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
    forcast_scaled = np.reshape(
        forcast_scaled, (forcast_scaled.shape[0], forcast_scaled.shape[1], 1))

    regressor = Sequential()

    dropoutunit = p.dropoutunit
    LSTM_unit_increment = p.LSTM_unit_increment

    # Adding the first LSTM layer and some Dropout regularisation
    regressor.add(
        LSTM(units=50,
             return_sequences=True,
             input_shape=(X_train.shape[1], 1)))
    regressor.add(Dropout(dropoutunit))

    LSTM_units = 50
    LSTM_units = LSTM_units + LSTM_unit_increment

    # Adding a second LSTM layer and some Dropout regularisation
    regressor.add(LSTM(units=LSTM_units, return_sequences=True))
    regressor.add(Dropout(dropoutunit))

    # Adding a third LSTM layer and some Dropout regularisation
    LSTM_units = LSTM_units + LSTM_unit_increment

    regressor.add(LSTM(units=LSTM_units, return_sequences=True))
    regressor.add(Dropout(dropoutunit))

    # Adding a fifth LSTM layer and some Dropout regularisation
    LSTM_units = LSTM_units + LSTM_unit_increment
    regressor.add(LSTM(units=LSTM_units))
    regressor.add(Dropout(dropoutunit))

    # print(X_train.shape,y_train.shape)
    # Adding the output layer
    regressor.add(Dense(units=1))

    # Compiling the RNN
    regressor.compile(optimizer='adam', loss='mean_squared_error')

    # Fitting the RNN to the Training set
    regressor.fit(X_train, y_train, epochs=p.epochs, batch_size=p.batch_size)
    print('rnn model build', X_test.shape)

    score = regressor.evaluate(X_test, y_test, batch_size=100, verbose=0)
    return regressor, score, X_train, X_test, y_train, y_test, forcast_scaled
コード例 #8
0
def Baseline_NN(nb_channels=3, dropoutRate = 0.5, act = 'relu', k_regularizer = regularizers.l2(0.001), input_dimension = 256):
    """ 
    Fully connected dense neural network

    :param nb_channels: number of class
    :param dropoutRate: drop-out rate of last layer
    :param act: activation function
    :param k_regularizer: kernel regularizer
    :param input_dimension: size of the input 
    """
    model = Sequential()
    # first layer
    model.add(Dense(128,   # or 100
                    input_dim=input_dimension, 
                    kernel_initializer='glorot_uniform', activation='sigmoid', kernel_regularizer=k_regularizer))
                    # 'normal', initializers.Constant(value=0), ...
                    #kernel_regularizer=regularizers.l2(0.01),  # smooth filters, but bad accuracy
                    #activation='sigmoid'))  # 'relu', 'sigmoid', 'tanh', ...
    # second layer
    model.add(Dense(64, kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    # third layer
    model.add(Dense(32, kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    # fourth layer
    model.add(Dense(16, kernel_initializer='glorot_uniform',  activation=act, kernel_regularizer=k_regularizer))
    # drop-out layer to prevent overfitting
    model.add(Dropout(rate=dropoutRate))
    
    # last layer
    model.add(Dense(nb_channels ,kernel_initializer='glorot_uniform',activation='softmax'))

    
    return model
コード例 #9
0
ファイル: densenet.py プロジェクト: wangrui1996/keras_ocrs
def transition_block(input,
                     nb_filter,
                     use_pool=True,
                     dropout_rate=None,
                     pooltype=1,
                     weight_decay=1e-4):
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv2D(nb_filter, (1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)

    if (dropout_rate):
        x = Dropout(dropout_rate)(x)
    if use_pool:
        if (pooltype == 2):
            x = AveragePooling2D((2, 2), strides=(2, 2))(x)
        elif (pooltype == 1):
            x = ZeroPadding2D(padding=(0, 1))(x)
            x = AveragePooling2D((2, 2), strides=(2, 1))(x)
        elif (pooltype == 3):
            x = AveragePooling2D((2, 2), strides=(2, 1))(x)
    return x, nb_filter
コード例 #10
0
def Simple_CNN():
    """     
    Simple convolutional neural network, implemented for comparison purposes.    

   Expecting 32x32x1 image data as input
   
   Conv2D<64> - Conv2D<32> -  Dense<3>
   kernel_initializer = None
   activation = relu
   kernel_size = 3
   padding = valid(default)
   
    """
    
    # create model
    model = Sequential()
    # add concolutional layers
    model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(32,32,1)))
    model.add(Conv2D(32, kernel_size=3, activation='relu'))
    # fully connected layer
    model.add(Flatten())
    model.add(Dropout(rate=0.35))
    model.add(Dense(3, activation='softmax'))

    
    return model
コード例 #11
0
def initialize_model():

    one_filter_keras_model = Sequential()
    one_filter_keras_model.add(
        Conv2D(filters=40,
               kernel_size=(1, 11),
               padding="same",
               input_shape=(1, 1500, 5),
               kernel_constraint=NonNeg()))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))

    one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 30)))

    one_filter_keras_model.add(Flatten())
    one_filter_keras_model.add(Dense(40))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))
    one_filter_keras_model.add(Dropout(0.5))

    one_filter_keras_model.add(Dense(1))
    one_filter_keras_model.add(Activation("sigmoid"))

    one_filter_keras_model.summary()
    one_filter_keras_model.compile(optimizer='adam',
                                   loss='binary_crossentropy',
                                   metrics=[precision, recall, specificity])

    return one_filter_keras_model
コード例 #12
0
def return_dropout(dropout_type, dropout_rate, axis=-1, rank=None):
    if dropout_type is None:
        return None
    elif dropout_type == 'plain':
        return Dropout(rate=dropout_rate)
    elif dropout_type == 'add':
        return InstanceGaussianNoise(axis=axis, alpha=dropout_rate)
    elif dropout_type == 'mul':
        return GaussianDropout(rate=dropout_rate)
    elif dropout_type == 'alpha':
        return AlphaDropout(rate=dropout_rate)
    elif dropout_type == 'spatial':
        if axis == 1:
            dformat = 'channels_first'
        else:
            dformat = 'channels_last'
        if rank == 1:
            return SpatialDropout1D(rate=dropout_rate)
        elif rank == 2:
            return SpatialDropout2D(rate=dropout_rate, data_format=dformat)
        elif rank == 3:
            return SpatialDropout3D(rate=dropout_rate, data_format=dformat)
        else:
            return None
    else:
        return None
コード例 #13
0
def create_dummy_classifier(window_size: int,
                            num_rows_df: int,
                            num_output_fields: int,
                            neurons_rnn: int = 10,
                            dropout: float = 0.0,
                            learning_rate: float = 0.01,
                            bidirection: bool = True,
                            return_sequences: bool = False):
                            
    lr_schedule = keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=10000,
        decay_rate=0.9)

    model = keras.Sequential(name='dummy_classifier')

    model.add(Input(shape=(window_size, num_rows_df), name='input'))

    if bidirection:
        model.add(Bidirectional(
            LSTM(neurons_rnn, return_sequences=return_sequences),
            name='bidirection'))
    else:
        model.add(LSTM(neurons_rnn, name="rnn",
                       return_sequences=return_sequences))
    if return_sequences:
        model.add(Flatten())
    model.add(Dropout(dropout, name='dropout'))
    model.add(Dense(num_output_fields, activation='sigmoid', name='dense_output'))

    model.summary()

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(learning_rate=lr_schedule), metrics=['accuracy', 'binary_accuracy'])
    return model
コード例 #14
0
def LSTM(nb_channels=3, img_size=32,time_slot = 10,num_color_chan=1):
    """
    Recurrent Neural Network for image data
    Never tested.
    :param nb_channels: number of class
    :param img_size: image size
    :param time_slot: number of frames/images in a video, length of the video
    :param num_color_chan = number of color channel in the image/frame
    """
    model = Sequential()
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))
    model.add(LSTM(time_slot, input_shape=(img_size, img_size, num_color_chan), return_sequences=True, activation='sigmoid'))

    # flatten and check
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Dropout(rate=0.5))
    model.add(Dense(nb_channels, activation='softmax'))
    
    return model
コード例 #15
0
    def get_model(self, embedding_matrix, vocab_size, question_len=15, img_feat=2048, embed_dim=300):
        number_of_hidden_units_LSTM = 512
        number_of_dense_layers      = 3
        number_of_hidden_units      = 1024
        activation_function         = 'tanh'
        dropout_pct                 = 0.5

        # Image model - loading image features and reshaping
        model_image = Sequential()
        model_image.add(Reshape((img_feat,), input_shape=(img_feat,)))

        # Language Model - 3 LSTMs
        model_language = Sequential()
        # model_language.add(Embedding(vocab_size, embedding_matrix.shape[1], input_length=question_len,
                                        # weights=[embedding_matrix], trainable=False))
        model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=True, input_shape=(question_len, embed_dim)))
        model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=True))
        model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=False))

        # combined model
        model = Sequential()
    
        model.add(concatenate([model_language, model_image]))

        for _ in range(number_of_dense_layers):
            model.add(Dense(number_of_hidden_units, kernel_initializer='uniform'))
            model.add(Activation(activation_function))
            model.add(Dropout(dropout_pct))

        model.add(Dense(1000))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        
        return model
コード例 #16
0
def get_model():
    """
    Builds and returns a feed forward neural net
    :return: keras sequential model
    """

    model = tf.keras.Sequential()
    model.add(Dense(250, input_dim=13, activation=tf.nn.relu))
    model.add(Dropout(0.4))
    model.add(Dense(200, activation=tf.nn.relu))
    model.add(Dropout(0.4))
    model.add(Dense(100, activation=tf.nn.relu))
    model.add(Dropout(0.3))
    model.add(Dense(50, activation=tf.nn.relu))
    model.add(Dense(1, activation=tf.nn.sigmoid))
    return model
コード例 #17
0
	def build(width, height, depth, classes):
		# initialize the model along with the input shape to be
		# "channels last" and the channels dimension itself
		model = Sequential()
		inputShape = (height, width, depth)
		chanDim = -1

		# if we are using "channels first", update the input shape
		# and channels dimension
		if K.image_data_format() == "channels_first":
			inputShape = (depth, height, width)
			chanDim = 1

		# first CONV => RELU => CONV => RELU => POOL layer set
		model.add(Conv2D(32, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(32, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# second CONV => RELU => CONV => RELU => POOL layer set
		model.add(Conv2D(64, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(64, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# first (and only) set of FC => RELU layers
		model.add(Flatten())
		model.add(Dense(512))
		model.add(Activation("relu"))
		model.add(BatchNormalization())
		model.add(Dropout(0.5))

		# softmax classifier
		model.add(Dense(classes))
		model.add(Activation("softmax"))

		# return the constructed network architecture
		return model
コード例 #18
0
def build_model():
    inp = Input(shape=(FRAME_H, FRAME_W, 3))
    x = Conv2D(filters=8, kernel_size=(5, 5), activation='relu')(inp)
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(filters=16, kernel_size=(5, 5), activation='relu')(x)
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(filters=32, kernel_size=(5, 5), activation='relu')(x)
    x = MaxPooling2D((2, 2))(x)

    x = Flatten()(x)
    x = Dropout(0.5)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1, activation='tanh')(x)
    return Model(inputs=[inp], outputs=[x])
コード例 #19
0
ファイル: feat6_nn.py プロジェクト: nsakki55/kaggle
    def fit(self, X, y):
        ## scaler
        self.scaler = StandardScaler()
        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1],)))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))
        
        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        ## fit
        self.model.fit(X, y,
                    epochs=self.nb_epoch, 
                    batch_size=self.batch_size,
                    validation_split=0, verbose=1)
        return self
 def Multi_Step_LSTM_model():
     # # Use Keras sequential model
     model = Sequential()
     # # First LSTM layer with Dropout regularisation; Set return_sequences to True to feed outputs to next layer
     model.add(
         LSTM(units=100,
              activation='relu',
              input_shape=(history_input, x_train.shape[1]),
              return_sequences=True))
     model.add(Dropout(0.2))
     # # # Second LSTM layer with Dropout regularisation; Set return_sequences to True to feed outputs to next layer
     model.add(LSTM(units=50, activation='relu', return_sequences=True))
     model.add(Dropout(0.2))
     model.add(Flatten())
     # # The output layer with linear activation to predict Open stock price
     model.add(Dense(units=1, activation="linear"))
     return model
コード例 #21
0
def conv_block(input, growth_rate, dropout_rate=None, weight_decay=1e-4):
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv2D(growth_rate, (3, 3),
               kernel_initializer='he_normal',
               padding='same')(x)
    if (dropout_rate):
        x = Dropout(dropout_rate)(x)
    return x
コード例 #22
0
ファイル: main.py プロジェクト: Ksenox/ANN-2021
def generateModel1():
    model = Sequential()
    model.add(Embedding(top_words, embedding_vector_length, input_length=max_review_length))
    model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Dropout(0.3))
    model.add(LSTM(64, dropout=0.25))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
コード例 #23
0
def create_model():
    model = tf.keras.models.Sequential([
        RandomCrop(32, 32, input_shape=(32, 32, 3)),
        RandomFlip(mode="horizontal"),
        RandomRotation(0.1),
        RandomZoom(0.1),
        tf.keras.layers.Conv2D(32, (5, 5),
                               padding="same",
                               input_shape=(32, 32, 3)),
        BatchNormalization(),
        ReLU(),
        tf.keras.layers.Conv2D(64, (3, 3), padding="same"),
        BatchNormalization(),
        ReLU(),
        tf.keras.layers.MaxPooling2D((3, 2), strides=1),
        Dropout(0.2),
        tf.keras.layers.Conv2D(128, (5, 5), padding="same"),
        BatchNormalization(),
        ReLU(),
        tf.keras.layers.Conv2D(128, (3, 3), padding="same"),
        BatchNormalization(),
        ReLU(),
        tf.keras.layers.MaxPooling2D((3, 2)),
        Dropout(0.4),
        tf.keras.layers.Conv2D(256, (3, 3), padding="same"),
        BatchNormalization(),
        ReLU(),
        tf.keras.layers.Conv2D(256, (3, 3), padding="same"),
        BatchNormalization(),
        ReLU(),
        tf.keras.layers.MaxPooling2D((3, 2)),
        Dropout(0.5),
        # tf.keras.layers.AveragePooling2D((1, 1), strides=1),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(256, activation="relu"),
        tf.keras.layers.Dense(10, activation="softmax"),
    ])
    model.compile(
        optimizer=tf.keras.optimizers.SGD(learning_rate=0.006, momentum=0.9),
        loss=tf.keras.losses.CategoricalCrossentropy(),
        metrics=["accuracy"],
    )
    return model
コード例 #24
0
    def build(width, height, depth, classes):

        # Model initialization
        model = Sequential()
        input_shape = (height, width, depth)
        chan_dim = -1

        # Data formatting
        if k.image_data_format() == "channels_first":
            chan_dim = 1

        # First layer set
        model.add(Conv2D(16, (3, 3), padding="same", input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(Conv2D(16, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Second layer set
        model.add(Conv2D(32, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(Conv2D(32, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=chan_dim))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # Third layer set
        model.add(Flatten())
        model.add(Dense(64))
        model.add(Activation("relu"))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        # Softmax classification layer set
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model
コード例 #25
0
    def network(self, weights=None):

        num_inp = Input(shape=[self.state_length])
        num_feats = Dense(70, activation='relu')(num_inp)
        num_feats = Dense(40, activation='relu')(num_feats)

        board_inp = Input(shape=[10, 10, 10])

        board_feats = Dropout(rate=0.05)(
            BatchNormalization()(Conv2D(30,
                                        kernel_size=(3, 3),
                                        strides=(1, 1),
                                        activation='relu')(board_inp)))

        board_feats = Dropout(rate=0.05)(
            BatchNormalization()(Conv2D(30,
                                        kernel_size=(3, 3),
                                        strides=(1, 1),
                                        activation='relu')(board_feats)))

        board_feats = (Conv2D(30,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              activation='relu')(board_feats))

        board_feats = Flatten()(board_feats)
        board_feats = Dropout(rate=0.05)(Dense(150,
                                               activation='relu')(board_feats))
        #board_feats = Dense(50, activation='relu')(board_feats)
        feats = Dropout(rate=0.05)(Concatenate()([num_feats, board_feats]))
        feats = Dropout(rate=0.02)(Dense(150, activation='relu')(feats))
        feats = Dense(60, activation='relu')(feats)
        output = Dense(4)(feats)

        model = Model([num_inp, board_inp], output)
        model.summary()
        opt = Adam(lr=self.learning_rate, )
        model.compile(loss='mse', optimizer=opt)

        if weights:
            model.load_weights(weights)
        return model
コード例 #26
0
    def create_nvidia_net(self, raw=120, column=320, channel=1):
        print('create nvidia model!!')

        input_shape = (raw, column, channel)

        activation = 'relu'
        keep_prob = 0.5
        keep_prob_dense = 0.5
        classes = 3

        model = Sequential()

        model.add(
            Conv2D(24, (5, 5),
                   input_shape=input_shape,
                   padding="valid",
                   strides=(2, 2)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        model.add(Conv2D(36, (5, 5), padding="valid", strides=(2, 2)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        model.add(Conv2D(48, (5, 5), padding="valid", strides=(2, 2)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(Dropout(keep_prob))

        # FC
        model.add(Flatten())

        model.add(Dense(100))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(50))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(10))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(classes))
        model.add(Activation('softmax'))

        self.model = model
コード例 #27
0
def createModel(train_data):
    classes = [
        'battery', 'disc', 'glass', 'metals', 'paper', 'plastic_jug_bottle',
        'plastic_packaging', 'styrofoam'
    ]

    model = Sequential()
    # Add layers
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               input_shape=train_data.shape[1:],
               activation='relu',
               name='conv_1'))
    model.add(Conv2D(32, (3, 3), activation='relu', name='conv_2'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_1'))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(64, (3, 3), padding='same', activation='relu', name='conv_3'))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv_4'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_2'))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(128, (3, 3), padding='same', activation='relu', name='conv_5'))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv_6'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='maxpool_3'))

    model.add(Flatten())
    model.add(Dense(512, activation='relu', name='dense_1'))
    model.add(Dropout(0.5))
    model.add(Dense(128, activation='relu', name='dense_2'))
    model.add(Dense(len(classes), name='output'))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])  # optimizer=RMSprop(lr=0.001)

    return model
コード例 #28
0
    def __init__(self):
        self.D = Sequential()
        self.D.add(
            Dense(1024,
                  input_dim=784,
                  kernel_initializer=initializers.RandomNormal(stddev=0.02)))
        self.D.add(LeakyReLU(0.2))
        self.D.add(Dropout(0.3))

        self.D.add(Dense(512))
        self.D.add(LeakyReLU(0.2))
        self.D.add(Dropout(0.3))

        self.D.add(Dense(256))
        self.D.add(LeakyReLU(0.2))
        self.D.add(Dropout(0.3))

        self.D.add(Dense(1, activation='sigmoid'))
        self.D.compile(loss='binary_crossentropy',
                       optimizer=Adam(lr=0.0002, beta_1=0.5))

        self.D.trainable = False
コード例 #29
0
def CNN_Image_Multi(nb_channels=3, dropoutRate = 0.5, act='relu', k_size=3, d_layer = 512, 
	k_regularizer = regularizers.l2(0.001), img_size=32,num_color_chan = 2):
   """ 
    Deep convolutional 2D neural network with softmax classifier

    :param nb_channels: number of class
    :param dropoutRate: drop-out rate of last layer
    :param act: activation function
    :param k_size: convolutional kernel size
    :param k_regularizer: kernel regularizer
    :param d_layer: number of hidden unit in the last layer
    :param img_size: image size
    :param num_color_chan = number of color channel in the image, no RGB values used but delta and beta-gamma power
    values of electrodes are used
    :param input_dimension: size of the input 
 
   Expecting 32x32x2 image data as input
   
   Conv2D<32> - Conv2D<32> - Conv2D<32> - Conv2D<32> - MaxPool2D<2,2> - 
   Conv2D<64> - Conv2D<64> - MaxPool2D<2,2> - 
   Conv2D<128> -  MaxPool2D<2,2> -
   Dense<512> - Dense<3>
   
    """
    strides = None
    print('PARAMETERS OF MODELS: ', act, ' ', k_size, ' ', d_layer)
    # create model
    model = Sequential()
    # add layers
    # activation is NONE  

    model.add(Conv2D(32, kernel_size=k_size, input_shape=(img_size,img_size,num_color_chan), 
    	kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer  ))
    model.add(Conv2D(32, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer) )
    model.add(Conv2D(32, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer) )
    model.add(Conv2D(32, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer) )
    model.add(MaxPooling2D(pool_size=(2, 2), strides=strides,  padding='valid', data_format='channels_last'))
    # after max-pooling
    model.add(Conv2D(64, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    model.add(Conv2D(64, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=strides,  padding='valid', data_format='channels_last'))
    #another max-pooling
    model.add(Conv2D(128, kernel_size=k_size, padding='same', kernel_initializer='glorot_uniform', activation=act, kernel_regularizer=k_regularizer))
    model.add(MaxPooling2D(pool_size=(2, 2),  strides=strides, padding='valid', data_format='channels_last'))
    # fully connected layer
    model.add(Flatten())
    model.add(Dense(d_layer))
    model.add(Dropout(rate=dropoutRate))
    model.add(Dense(nb_channels, activation='softmax'))
    
    return model
コード例 #30
0
def CNN_Video(nb_channels=3, dropoutRate = 0.5, act='relu', k_size=3, d_layer = 512, 
	k_regularizer = regularizers.l1(0.001), img_size=32,time_slot = 100,num_color_chan=1):

    """ 
    Deep convolutional 3D neural network with softmax classifier

    :param nb_channels: number of class
    :param dropoutRate: drop-out rate of last layer
    :param act: activation function
    :param k_size: convolutional kernel size
    :param k_regularizer: kernel regularizer
    :param d_layer: number of hidden unit in the last layer
    :param img_size: image size
    :param time_slot: number of frames/images in a video, length of the video
    :param num_color_chan = number of color channel in the image/frame, no RGB values used real values of electrodes are used
    :param input_dimension: size of the input 
 
   Expecting 100x32x32x1 video data as input
   
   Conv3D<32> - Conv3D<32> - Conv3D<32> - Conv3D<32> - MaxPool3D<2,2,2> - 
   Conv3D<64> - Conv3D<64> - MaxPool3D<2,2,2> - 
   Dense<512> - Dense<3>
   
    """
 
    strides = None
    # In each convolutional layer, 10 consecutive images are convolved
    kernel = (10, k_size, k_size)

    print('PARAMETERS OF MODELS: ', act, ' ', k_size, ' ', d_layer)
  
    model = Sequential()
    # add layers
    model.add(Conv3D(32, kernel_size=kernel, input_shape=(time_slot,img_size,img_size,num_color_chan), activation=act))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(Conv3D(32, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act ))
    model.add(MaxPooling3D(pool_size=kernel, strides=strides, data_format='channels_last'))
    # new layer
    model.add(Conv3D(64, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act))
    model.add(Conv3D(64, kernel_size=kernel, padding='same', kernel_initializer='glorot_uniform', activation=act))
    model.add(MaxPooling3D(pool_size=(2,2,2),strides=strides, data_format='channels_last'))
    
    # flatten and check
    model.add(Flatten())
    model.add(Dense(d_layer))
    model.add(Dropout(rate=dropoutRate))
    model.add(Dense(nb_channels, activation='softmax'))
    
    return model