def __init__(self, inputs): Sequential.__init__(self) self.add(Dense(units=128, activation='relu', input_dim=inputs)) self.add(Dense(units=64, activation='relu')) self.add(Dense(units=16, activation='relu')) self.add(Dense(units=1, activation='linear')) self.compile(loss='mean_squared_error', optimizer='sgd', metrics=[])
def __init__(self, input_shape): Sequential.__init__(self) #partie RNN RNN = Sequential() RNN.add(Dense(100, input_dim = 192)) RNN.add(Activation('relu')) #partie CNN CNN = Sequential() CNN.add(Convolution2D(10, 5, 5, input_shape=input_shape, border_mode='same')) CNN.add(Activation('relu')) CNN.add(MaxPooling2D(pool_size=(2, 2))) CNN.add(Convolution2D(15, 5, 5)) CNN.add(Activation('relu')) CNN.add(MaxPooling2D(pool_size=(2, 2))) CNN.add(Dropout(0.5)) CNN.add(Flatten()) #mélange des deux self.add(Merge([RNN, CNN], mode = 'concat')) self.add(Dense(100)) self.add(Activation('relu')) self.add(Dense(99)) self.add(Activation('softmax')) #fonction de calcul de la perte self.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics = ['categorical_accuracy', 'precision', 'recall', 'fmeasure'])
def __init__(self, input_dimension, quantiles, arch, layers=None): """ Create a fully-connected neural network. Args: input_dimension(:code:`int`): Number of input features quantiles(:code:`array`): The quantiles to predict given as fractions within [0, 1]. arch(tuple): Tuple :code:`(d, w, a)` containing :code:`d`, the number of hidden layers in the network, :code:`w`, the width of the network and :code:`a`, the type of activation functions to be used as string. """ quantiles = np.array(quantiles) output_dimension = quantiles.size if layers is None: if len(arch) == 0: layers = [Dense(output_dimension, input_shape=(input_dimension))] else: d, w, a = arch layers = [Dense(w, input_shape=(input_dimension,))] for _ in range(d - 1): layers.append(Dense(w, input_shape=(w,))) if a is not None: layers.append(Activation(a)) layers.append(Dense(output_dimension, input_shape=(w,))) KerasModel.__init__(self, input_dimension, quantiles) Sequential.__init__(self, layers)
def __init__(self, num_classes): Sequential.__init__(self) self.add(Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3))) self.add(BatchNormalization()) self.add(Activation('relu')) self.add(Conv2D(32, (3, 3))) self.add(BatchNormalization()) self.add(Activation('relu')) self.add(MaxPooling2D(pool_size=(2, 2))) self.add(Dropout(0.25)) self.add(Conv2D(64, (3, 3), padding='same')) self.add(BatchNormalization()) self.add(Activation('relu')) self.add(Conv2D(64, (3, 3))) self.add(BatchNormalization()) self.add(Activation('relu')) self.add(MaxPooling2D(pool_size=(2, 2))) self.add(Dropout(0.25)) self.add(Flatten()) self.add(Dense(512)) self.add(BatchNormalization()) self.add(Activation('relu')) self.add(Dropout(0.5)) self.add(Dense(num_classes)) self.add(Activation('softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def __init__(self, dims=93, nb_classes=9, nb_epoch=50, learning_rate=0.004, validation_split=0.0, batch_size=128, verbose=1): Sequential.__init__(self) self.dims = dims self.nb_classes = nb_classes self.nb_epoch = nb_epoch self.learning_rate = learning_rate self.validation_split = validation_split self.batch_size = batch_size self.verbose = verbose print('Initializing Keras Deep Net with %d features and %d classes' % (self.dims, self.nb_classes)) self.add(Dropout(0.15)) self.add(Dense(dims, 512, activation='tanh')) self.add(BatchNormalization((512,))) self.add(Dropout(0.5)) self.add(Dense(512, 256)) self.add(PReLU((256,))) self.add(BatchNormalization((256,))) self.add(Dropout(0.3)) self.add(Dense(256, 128)) self.add(PReLU((128,))) self.add(BatchNormalization((128,))) self.add(Dropout(0.1)) self.add(Dense(128, nb_classes)) self.add(Activation('softmax')) sgd = SGD(lr=self.learning_rate, decay=1e-7, momentum=0.99, nesterov=True) self.compile(loss='categorical_crossentropy', optimizer=sgd)
def __init__(self): Sequential.__init__(self) self.weights_path = "./weights.hdf5" self.dataset_path = "./esd/" np.random.seed(123) self.create_model()
def __init__(self, units, optim='sgd'): """ Build model. Based on specified units. """ Sequential.__init__(self) self.add(Dense(units[1], input_shape=(units[0], ), activation='relu')) for i in units[2:-1]: self.add(Dense(i, activation='relu')) self.add(Dense(units[-1])) self.compile(loss='mse', optimizer=optim)
def __init__(self, neurons=3, dropout=0.05, dense=1, train_percent=0.75): Sequential.__init__(self) self.neurons = neurons self.dropout = dropout self.dense = dense self.scaler = MinMaxScaler(feature_range=(-1, 1)) self.df = None self.X = None self.y = None self.train_percent = train_percent self.predictions = None
def __init__(self, units, optim='sgd'): """ Build model. Based on specified units. """ Sequential.__init__(self) self.add(Dense(units[1], input_shape=(units[0], ), activation='relu')) for i in units[2:-1]: self.add(Dense(i, activation='relu')) if units[-1] == 1: self.add(Dense(units[-1], activation='sigmoid')) l = binary_crossentropy else: self.add(Dense(units[-1], activation='softmax')) l = categorical_crossentropy self.compile(loss=l, optimizer=optim, metrics=['accuracy'])
def __init__(self, input_shape): Sequential.__init__(self) self.add(Convolution2D(10, 5, 5, input_shape=input_shape, border_mode='valid')) self.add(Activation('relu')) self.add(MaxPooling2D(pool_size=(2, 2))) # self.add(Dropout(0.5)) self.add(Convolution2D(15, 5, 5)) self.add(Activation('relu')) self.add(MaxPooling2D(pool_size=(2, 2))) self.add(Dropout(0.5)) self.add(Flatten()) self.add(Dense(99)) self.add(Activation('softmax')) # self.add(Dense(1)) # self.add(Activation('relu')) #on utilise sparse_categorical_crossentropy pour éviter d'avoir des problèmes #liés à la différence entre le nombre de sorties et les catégories self.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics = ['categorical_accuracy', 'precision', 'recall', 'fmeasure'])
def __init__(self): Sequential.__init__(self) self.add(Dense(100, input_dim = 192)) self.add(Activation('relu')) # self.add(Dense(60)) # self.add(Activation('relu')) # self.add(Dense(40)) # self.add(Activation('relu')) self.add(Dense(99)) self.add(Activation('softmax')) #hyper nul # self.compile(loss='mse', optimizer='sgd', #nul mais moins nul # self.compile(loss='categorical_crossentropy', optimizer='sgd', #à peu près équivalent à celui du dessous # self.compile(loss='mse', optimizer='rmsprop', #super bien self.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics = ['categorical_accuracy', 'precision', 'recall', 'fmeasure'])
def __init__(self, params): Sequential.__init__(self) self.__dict__.update(params) # Layer #1: normalization self.add( Lambda(lambda x: (x / 255.0) - 0.5, input_shape=params['input_shape'])) self.add( Cropping2D(cropping=params['cropping'], input_shape=self.input_shape)) # adds convolution layers for i, n in enumerate(self.nFilters): self.add( Conv2D(n, (self.kernel[i], self.kernel[i]), padding='same', activation='relu')) self.add(BatchNormalization()) self.add( MaxPooling2D((self.maxPoolStride[i], self.maxPoolStride[i]))) self.add(Dropout(self.convDropout)) # flatten layer self.add(Flatten()) # adds fully connected layers for i, n in enumerate(self.FC): self.add(Dense(n, activation='relu')) self.add(BatchNormalization()) self.add(Dropout(self.fcDropout)) self.add(Dense(self.outDim, activation='sigmoid')) # add optimizer self.compile(loss='mse', optimizer='adam') self.summary()
def __init__(self, is_dropout=True, is_bn=False, seed=22, initializer="glorot_uniform", is_init_fixed=True): Sequential.__init__(self) self.seed = seed np.random.seed(seed) # build the network architecture if initializer != "LSUV": self.add( Convolution2D(96, 3, 3, border_mode='same', input_shape=(32, 32, 3), kernel_initializer=initializer)) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add( Convolution2D(96, 3, 3, border_mode='same', kernel_initializer=initializer)) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add( Convolution2D(96, 3, 3, border_mode='same', subsample=(2, 2), kernel_initializer=initializer)) if is_dropout: self.add(Dropout(0.5)) self.add( Convolution2D(192, 3, 3, border_mode='same', kernel_initializer=initializer)) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add( Convolution2D(192, 3, 3, border_mode='same', kernel_initializer=initializer)) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add( Convolution2D(192, 3, 3, border_mode='same', subsample=(2, 2), kernel_initializer=initializer)) if is_dropout: self.add(Dropout(0.5)) self.add( Convolution2D(192, 3, 3, border_mode='same', kernel_initializer=initializer)) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add( Convolution2D(192, 1, 1, border_mode='valid', kernel_initializer=initializer)) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add( Convolution2D(10, 1, 1, border_mode='valid', kernel_initializer=initializer)) self.add(GlobalAveragePooling2D()) self.add(Activation('softmax')) else: self.add( Convolution2D(96, 3, 3, border_mode='same', input_shape=(32, 32, 3))) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add(Convolution2D(96, 3, 3, border_mode='same')) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add( Convolution2D(96, 3, 3, border_mode='same', subsample=(2, 2))) if is_dropout: self.add(Dropout(0.5)) self.add(Convolution2D(192, 3, 3, border_mode='same')) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add(Convolution2D(192, 3, 3, border_mode='same')) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add( Convolution2D(192, 3, 3, border_mode='same', subsample=(2, 2))) if is_dropout: self.add(Dropout(0.5)) self.add(Convolution2D(192, 3, 3, border_mode='same')) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add(Convolution2D(192, 1, 1, border_mode='valid')) if is_bn: self.add(BatchNormalization()) self.add(Activation('relu')) self.add(Convolution2D(10, 1, 1, border_mode='valid')) self.add(GlobalAveragePooling2D()) self.add(Activation('softmax'))
def __init__(self, name): Sequential.__init__(self) Wrapper.__init__(self, name)
def __init__(self, config): AbstractModel.__init__(self, config) Sequential.__init__(self)
def __init__(self): features = 7 * 24 layers = [ Dense(128, input_dim=features), LeakyReLU(0.05), Dropout(0.2), # Dense(128), BatchNormalization(), LeakyReLU(0.05), # Dense(128), BatchNormalization(), LeakyReLU(0.05), Dropout(0.2), Dense(128), BatchNormalization(), LeakyReLU(0.05), Dense(128), BatchNormalization(), LeakyReLU(0.05), Dropout(0.2), Dense(128), BatchNormalization(), LeakyReLU(0.05), Dense(256), BatchNormalization(), LeakyReLU(0.05), Dropout(0.3), Dense(256), BatchNormalization(), LeakyReLU(0.05), Dense(256), BatchNormalization(), LeakyReLU(0.05), Dropout(0.3), Dense(256), BatchNormalization(), LeakyReLU(0.05), Dense(256), BatchNormalization(), LeakyReLU(0.05), Dropout(0.3), Dense(256), BatchNormalization(), LeakyReLU(0.05), # Dense(256), LeakyReLU(0.05), Dropout(0.3), # Dense(256), LeakyReLU(0.05), Dropout(0.3), Dense(512), BatchNormalization(), LeakyReLU(0.05), Dropout(0.4), Dense(512), BatchNormalization(), LeakyReLU(0.05), Dense(512), BatchNormalization(), LeakyReLU(0.05), Dropout(0.4), Dense(512), BatchNormalization(), LeakyReLU(0.05), # Dense(512), LeakyReLU(0.05), Dropout(0.4), # Dense(1024), LeakyReLU(0.05), Dropout(0.5), Dense(1024), BatchNormalization(), LeakyReLU(0.05), Dropout(0.5), Dense(256), BatchNormalization(), LeakyReLU(0.05), Dense(9, activation='softmax') ] Sequential.__init__(self, layers=layers) self.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) self.summary()
def __init__(self): Sequential.__init__(self)