def build(size, loadweights, lopath): model = Sequential() model.add( LSTM(200, return_sequences=True, input_shape=(size, 8), activation='relu')) model.add(LSTM(300, return_sequences=True, activation='relu')) model.add(LSTM(450, return_sequences=True, activation='relu')) model.add(LSTM(500, return_sequences=True, activation='relu')) model.add(Dropout(.4)) model.add(Dense(250, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dense(75, activation='relu')) model.add(Dropout(.1)) model.add(Dense(1)) model.compile(loss='mse', optimizer='Nadam') model.build() if loadweights == True: model.load_weights(lopath) print("model has been built") return model
def build_bilstm_crf_model(NUM_CLASS): """ 带embedding的双向LSTM + crf """ model = Sequential() # model.add(Embedding(VOCAB_SIZE, output_dim=EMBEDDING_OUT_DIM, mask_zero=True)) # model.add(Dropout(DROPOUT_RATE)) model.add(LSTM(HIDDEN_UNITS, return_sequences=True)) # model.add(Bidirectional(LSTM(HIDDEN_UNITS, return_sequences=True))) # model.add(Dropout(DROPOUT_RATE)) model.add(TimeDistributed(Dense(NUM_CLASS))) model.add(Dropout(0.5)) crf_layer = CRF(NUM_CLASS, sparse_target=True) model.add(crf_layer) model.build((None, 238, 768)) # model.summary() adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss=losses.crf_loss, optimizer='rmsprop', metrics=[metrics.crf_accuracy]) return model
def get_small_model(): use_dropout = True num_classes = 10 small_model = Sequential() small_model.add( TimeDistributed( Conv2D(filters=32, kernel_size=(5, 5), activation='relu', input_shape=(10, 212, 380, 1)))) small_model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) if use_dropout: small_model.add(TimeDistributed(Dropout(0.25))) small_model.add( TimeDistributed( Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))) small_model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2)))) if use_dropout: small_model.add(TimeDistributed(Dropout(0.25))) small_model.add(TimeDistributed(Flatten())) small_model.add(LSTM(64, dropout=0.25)) if use_dropout: small_model.add(Dropout(0.25)) small_model.add(Dense(256, activation='relu')) small_model.add(Dense(num_classes, activation='softmax')) small_model.build(input_shape=(None, 10, 212, 380, 1)) small_model = load_model("small_model_weights.h5") return small_model
def test_batchnorm_layer_reload(self): # Save a tf backend keras h5 model tf_model = KTF.tf.keras.models.Sequential([ KTF.tf.keras.layers.Dense(10, kernel_initializer="zeros"), KTF.tf.keras.layers.BatchNormalization(), ]) tf_model.build(input_shape=(1, 10)) _, fname = tempfile.mkstemp(".h5") tf_model.save(fname) # Load from MXNet backend keras try: mx_model = load_model(fname, compile=False) except TypeError: warnings.warn( "Could not reload from tensorflow backend saved model.") assert False # Retest with mxnet backend keras save + load mx_model_2 = Sequential([ Dense(10, kernel_initializer="zeros"), BatchNormalization(), ]) mx_model_2.build(input_shape=(1, 10)) _, fname = tempfile.mkstemp(".h5") mx_model_2.save(fname) try: mx_model_3 = load_model(fname, compile=False) except TypeError: warnings.warn("Could not reload from MXNet backend saved model.") assert False
def __init__(self, input_dim, n_hidden_units, n_hidden_layers, nonlinearity='tanh', bias_sigma=0.0, weight_sigma=1.25, input_layer=None, flip=False, output_dim=None): #if input_layer is not None: # assert input_layer.output_shape[1] == input_dim self.input_dim = input_dim self.n_hidden_units = n_hidden_units self.n_hidden_layers = n_hidden_layers self.nonlinearity = nonlinearity self.bias_sigma = bias_sigma self.weight_sigma = weight_sigma self.input_layer = input_layer if output_dim is None: output_dim = n_hidden_units self.output_dim = output_dim model = Sequential() if input_layer is not None: model.add(input_layer) for i in xrange(n_hidden_layers): nunits = n_hidden_units if i < n_hidden_layers - 1 else output_dim if flip: model.add( Activation(nonlinearity, input_shape=(input_dim, ), name='_a%d' % i)) model.add(Dense(nunits, name='_d%d' % i)) else: model.add( Dense(nunits, input_shape=(input_dim, ), name='_d%d' % i)) if i < n_hidden_layers - 1 or self.output_dim == self.n_hidden_units: model.add(Activation(nonlinearity, name='_a%d' % i)) else: # Theano is optimizing out the nonlinearity if it can which is breaking shit # Give it something that it won't optimize out. model.add( Activation(lambda x: T.minimum(x, 999999.999), name='_a%d' % i)) model.build() self.model = model self.weights = model.get_weights() self.dense_layers = filter(lambda x: x.name.startswith('_d'), model.layers) self.hs = [h.output for h in self.dense_layers] self.act_layers = filter(lambda x: x.name.startswith('_a'), model.layers) self.f_acts = self.f_jac = self.f_jac_hess = self.f_act = None vec = K.ones_like(self.model.input) self.Js = [T.Rop(h, self.model.input, vec) for h in self.hs] self.Hs = [T.Rop(J, self.model.input, vec) for J in self.Js]
def density_to_max_count(self, pool=(7, 7), strides=(1, 1)): """Add layers to predict the maximum local count from the density map. The pool is the size of the regions to consider. The sliding window horizontal and vertical strides are given by 'strides'. Parameters ---------- pool : tuple A 2-tuple of integers defining the size of the region over which the maximum local count will be computed. strides : tuple A 2-tuple of integers defining the strides of the sliding window in the horizontal and vertical directions. """ if self._max_count_layers == False: new_model = Sequential() new_model.add(InputLayer(input_shape=(None, None, 1), name="input")) for layer in self.model.layers[1:]: new_model.add(layer) new_model.add( AveragePooling2D(pool_size=pool, strides=strides, padding='valid', name='av_pool')) new_model.add(Lambda(lambda x: x * pool[0] * pool[1], name='mult')) new_model.add(GlobalMaxPooling2D(name='max_count_output')) new_model.build() self.model = new_model self._max_count_layers = True else: warnings.warn('Model is already in "max_count" configuration')
def define_chosen(oldmodel, index, conv_indexes, trinable_indexes): newmodel = clone_model(oldmodel) newmodel.set_weights(oldmodel.get_weights()) indexed_layer = conv_indexes[index-1] print(indexed_layer) layers = newmodel.layers[indexed_layer:] print(layers) model = Sequential(layers) if(indexed_layer == -len(oldmodel.layers)): model = oldmodel else: model.build(newmodel.layers[indexed_layer-1].output_shape) # model.summary() last_nont_trainable = conv_indexes[index-1] for i in trinable_indexes[:trinable_indexes.index(last_nont_trainable) ]: model._layers[i].trainable = False model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy']) print("Setting weights") for i in trinable_indexes[:trinable_indexes.index(indexed_layer)+1]: model._layers[i].set_weights(oldmodel.layers[i].get_weights()) return model
def main(): path = "./../../DataSets/" X = np.array(prepareData(path)) Y = prepareValues() (trainX, testX, trainY, testY) = train_test_split(X, Y, test_size=0.3, random_state=1349) model = Sequential() model.add(Conv1D(filters=200, kernel_size=3, activation='softmax', input_shape=(200, len(Y.shape[2]), ))) model.add(Dense(100, activation='relu')) model.add(Conv1D(100, (3,), activation='softmax')) model.add(Dense(20, activation='relu')) model.add(Conv1D(20, (3,), activation='softmax')) model.add(Conv1D(10, (3,), activation='relu')) model.add(Conv1D(20, (3,), activation='softmax')) model.add(Dense(2, activation='relu')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.build() output = model.fit_generator((trainX, trainY), validation_data=(testX, testY), steps_per_epoch=len(trainX), epochs=18000, verbose=1) plt.style.use("ggplot") plt.figure() plt.plot(np.arange(0, 18000), output.history["loss"], label="train_loss") plt.plot(np.arange(0, 18000), output.history["val_loss"], label="val_loss") plt.plot(np.arange(0, 18000), output.history["acc"], label="train_acc") plt.plot(np.arange(0, 18000), output.history["val_acc"], label="val_acc") plt.title("Training Loss and Accuracy") plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend(loc="upper left") plt.savefig("plot")
class rnn_text_classification(object): def __init__(self, input_dim, classes): self.model = Sequential() self.model.add(LSTM(128, input_shape = (None, input_dim))) self.model.add(Dense(classes, activation = 'softmax')) self.model.build() self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) self.model.summary()
def build_model(label): model = Sequential() # define CNN model model.add(InputLayer(input_shape=input_shape)) model.add((Conv1D(filters=32, kernel_size=2, activation='relu'))) model.add((MaxPooling1D(pool_size=2))) model.add((Flatten())) model.add(Dense(label, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.build() return model
def build_discriminator(): model=Sequential() model.add(Dense(units=512,input_dim=20)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(units=256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(units=128)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(units=1,activation='sigmoid')) d_adam = Adam(0.0005,0.5) model.compile(loss='binary_crossentropy',metrics=['accuracy'],optimizer=d_adam) model.build((None,20)) return model
def train_NN(train_x: pd.DataFrame, train_y: pd.DataFrame) -> str: """Neural Network model: convolutional neural network that need to be tuned to give better results. We tried several architectures and layer types but we never came close to the results from xgboost with this model""" # Validation set creation train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size=0.2, random_state=0) train_x = train_x.values.reshape((train_x.shape[0], train_x.shape[1], 1)) val_x = val_x.values.reshape((val_x.shape[0], val_x.shape[1], 1)) # class weight for imbalanced dataset class_weight = compute_class_weight('balanced', [0, 1], train_y) class_weight = {0: class_weight[0], 1: class_weight[1]} model = Sequential() model.add( Conv1D(filters=25, kernel_size=2, activation='relu', input_shape=(train_x.shape[1], train_x.shape[2]))) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(MaxPooling1D(pool_size=2)) model.add(BatchNormalization()) model.add(Conv1D(filters=10, kernel_size=2, activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(100, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(50, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.build() model.compile(loss='binary_crossentropy', optimizer='adamax', metrics=[AUC()]) model.fit(train_x, train_y, validation_data=(val_x, val_y), epochs=10, batch_size=100, verbose=1, class_weight=class_weight) # save model # filepath = "/data/06_models/NN_model" # model.save(filepath) return model # filepath
def color_model(n_components, n_categories): A = n_categories model = Sequential([ Dense(A), BatchNormalization(), Activation('relu'), Dropout(0.1), Dense(A), BatchNormalization(), Dense(n_categories, activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.build(input_shape=(None, n_components,)) return model
def construct_network(cut_model, filter_list, input_shape, add_pooling=False, last_pool=False): """Returns new network with selected conv kernels""" # set data type for network if selected if recast_data: tf.keras.backend.set_floatx(DATA_TYPE) # get conv filters weights, bias and shape last_layer = cut_model.layers[-1] init_weights = last_layer.get_weights()[0] init_bias = last_layer.get_weights()[1] shape = list(init_weights.shape[0:-1]) shape.append(len(filter_list)) # set build_shape build_shape = (1, input_shape[0], input_shape[1], input_shape[2]) # arrays for filters and bias filters = np.empty(shape, dtype='float32') bias = np.empty((len(filter_list)), dtype='float32') # add weights to new layer for i in range(0, len(filter_list)): filters[:, :, :, i] = init_weights[:, :, :, filter_list[i]] bias[i] = init_bias[filter_list[i]] out_layer = [filters, bias] # create new model out_model = Sequential() # add pooling if needed if add_pooling: out_model.add(MaxPooling2D(pool_size=(2, 2))) build_shape = (1, build_shape[1] * 2, build_shape[2] * 2, build_shape[3]) out_model.add( Conv2D(len(filter_list), (shape[0], shape[1]), padding=last_layer.padding, input_shape=input_shape, name=str(len(init_bias)), activation='relu')) if last_pool: out_model.add(MaxPooling2D(pool_size=(2, 2))) out_model.build(input_shape=build_shape) if last_pool: out_model.layers[-2].set_weights(out_layer) else: out_model.layers[-1].set_weights(out_layer) out_model.name = lts(filter_list) out_model.compile('SGD', loss='mean_squared_error') return out_model
def all_cnn_c(inputShape): model = Sequential() model.add(Conv2D(96,kernel_size=(3,3),activation='relu',padding='same')) model.add(Conv2D(96,kernel_size=(3,3),activation='relu',padding='same')) model.add(Conv2D(96,kernel_size=(3,3),activation='relu',padding='same')) model.add(Conv2D(192,(3,3),activation='relu',padding='same')) model.add(Conv2D(192,(3,3),activation='relu',padding='same')) model.add(Conv2D(192,(3,3),activation='relu',padding='same')) model.add(Conv2D(192,(3,3),activation='relu',padding='same')) model.add(Conv2D(192,(1,1),activation='relu')) model.add(GlobalAveragePooling2D()) model.add(Dense(5, activation='softmax')) model.build(inputShape) model.compile(loss=categorical_crossentropy,optimizer=Adam(0.001),metrics=['accuracy']) return model
def palette_model(n_categories): A = 2 * n_categories model = Sequential([ Dense(A, activation='relu'), Dropout(0.1), Dense(A, activation='relu'), Dropout(0.1), Dense(A, activation='relu'), Dropout(0.1), Dense(n_categories, activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.build(input_shape=(None, n_categories,)) return model
def build_model(tag: str) -> Model: """Build a model for prediction. Args: tag: Either 'dense' or 'convolutional'. """ if tag == "dense": model = Sequential([ Flatten(input_shape=(32, 32, 3)), Dense(200, activation='relu'), Dense(150, activation='relu'), Dense(NUM_CLASSES, activation='softmax') ]) # The same model built using the Functional API. # input_layer = Input(shape=(32, 32, 3)) # x = Flatten()(input_layer) # x = Dense(units=200, activation = 'relu')(x) # x = Dense(units=150, activation = 'relu')(x) # output_layer = Dense(units=10, activation = 'softmax')(x) # model = Model(input_layer, output_layer) elif tag == "convolutional": model = Sequential([ Input(shape=(32, 32, 3)), Conv2D(filters=32, kernel_size=3, strides=1, padding='same'), BatchNormalization(), LeakyReLU(), Conv2D(filters=32, kernel_size=3, strides=2, padding='same'), BatchNormalization(), LeakyReLU(), Conv2D(filters=64, kernel_size=3, strides=1, padding='same'), BatchNormalization(), LeakyReLU(), Conv2D(filters=64, kernel_size=3, strides=2, padding='same'), BatchNormalization(), LeakyReLU(), Flatten(), Dense(128), BatchNormalization(), LeakyReLU(), Dropout(rate=0.5), Dense(NUM_CLASSES, activation='softmax') ]) else: raise ValueError("tag must be 'dense' or 'convolutional'") model.build() return model
def createMachineLearnModel(x, yy): num_labels = yy.shape[1] model = Sequential() model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_labels)) model.add(Activation('softmax')) # Compile the model model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') model.build(input_shape=[x.shape[0], x.shape[1]]) model.summary() return model
def build_model(): model = Sequential() model.add(Dense(units=10, input_dim=2, activation='relu')) model.add(Dense(units=10, activation='relu')) # model.add(Dense(units=10, activation='relu')) # model.add(Dense(units=10, activation='relu')) model.add(Dense(units=1, activation='linear')) adam = Adam() #lr=.01, decay=1e-6) sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='mse', optimizer=adam) # model = Sequential() # model.add(Dense(40,input_dim=2, activation='relu')) # # model.add(Dense(30, activation='relu')) # model.add(Dense(1, activation='sigmoid')) # model.compile(loss='binary_crossentropy', optimizer=adam) model.build() print(model.summary()) return model
def _unet_to_lstm(model): model_without_softmax = Model(input=model.input, output=model.layers[-2].output) model_without_softmax.build((IMG_HEIGHT, IMG_WIDTH, 3)) unet_lstm = Sequential() unet_lstm.add( TimeDistributed(model_without_softmax, input_shape=(FRAMES_PER_SAMPLE, IMG_HEIGHT, IMG_WIDTH, 3))) shape = (FRAMES_PER_SAMPLE, IMG_HEIGHT, IMG_WIDTH, CLASS_NUM) conv_lstm = ConvLSTM2D(filters=CLASS_NUM, kernel_size=(3, 3), activation='tanh', input_shape=shape, padding='same',\ kernel_initializer=Zeros(), recurrent_initializer=Zeros(), bias_initializer=Zeros()) unet_lstm.add(conv_lstm) unet_lstm.add(Softmax()) unet_lstm.build() return unet_lstm
def build_model(self): model = Sequential() model.add( Conv2D(128, kernel_size=(1, 3), strides=1, activation="relu", input_shape=self.state_size)) model.add(MaxPool2D(pool_size=(1, 2))) model.add(Conv2D(64, kernel_size=(1, 4), strides=1, activation="relu")) model.add(Conv2D(1, kernel_size=(1, 1), activation="linear")) model.add(Flatten()) model.add(Dense(self.action_size, activation="softmax")) model.build() model.summary() return model
def testDNN(): n_outputs = 1 model = Sequential() for i in range(int(hp["NOlayers"])): model.add(Dense(hp["NOUnits"+str(i)], activation=hp["activation"+str(i)])) model.add(Dropout(hp["rateDropout"+str(i)])) model.add(Dense(n_outputs,activation='sigmoid')) resultsValid={"loss":[],"mae":[], "acc":[],"detail":[[]]} verbose, epochs, sizeBatch = 1, hp["epochs"], hp["sizeBatch"] n_features, n_outputs = xTrain.shape[1], 1 model.build((None,n_features)) if hp["optimizer"] == "sgd": lrSgd = hp["lrSgd"] momentumSgd = hp["momentumSgd"] opt = keras.optimizers.SGD(lr=lrSgd, momentum=momentumSgd) elif hp["optimizer"] == "adagrad": opt = keras.optimizers.Adagrad() elif hp["optimizer"] == "adadelta": opt = keras.optimizers.Adadelta() elif hp["optimizer"] == 'adam': lrAdam = hp["lrAdam"] beta_1Adam = hp["beta1Adam"] beta_2Adam = hp["beta2Adam"] epsilonAdam = hp["epsilonAdam"] opt = keras.optimizers.Adam(lr=lrAdam, beta_1=beta_1Adam, beta_2=beta_2Adam, epsilon=epsilonAdam) elif hp["optimizer"] == "adamax": lrAdamax = hp["lrAdamax"] beta_1Adamax = hp["beta1Adamax"] beta_2Adamax = hp["beta2Adamax"] epsilonAdamax = hp["epsilonAdamax"] opt = keras.optimizers.Adamax(lr=lrAdamax, beta_1=beta_1Adamax, beta_2=beta_2Adamax, epsilon=epsilonAdamax) elif hp["optimizer"] == "nadam": lrNadam = hp["lrNadam"] beta_1Nadam = hp["beta1Nadam"] beta_2Nadam = hp["beta2Nadam"] epsilonNadam = hp["epsilonNadam"] opt = keras.optimizers.Nadam(lr=lrNadam, beta_1=beta_1Nadam, beta_2=beta_2Nadam, epsilon=epsilonNadam) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) history=model.fit(xTrain, yTrain, epochs=epochs, batch_size=sizeBatch, verbose=verbose, validation_data=(xTest, yTest), callbacks=[TestCallback(xTest, yTest, sizeBatch, resultsValid, 0)]) self.saveGraphTrain(history, 0)
def ConvPool_CNN_C(inputShape): model = Sequential() model.add(Conv2D(96,kernel_size=(3,3),activation='relu',padding='same')) model.add(Conv2D(96,kernel_size=(3,3),activation='relu',padding='same')) model.add(Conv2D(96,kernel_size=(3,3),activation='relu',padding='same')) model.add(MaxPooling2D(pool_size=(3,3),strides=2)) model.add(Conv2D(192,(3,3),activation='relu',padding='same')) model.add(Conv2D(192,(3,3),activation='relu',padding='same')) model.add(Conv2D(192,(3,3),activation='relu',padding='same')) model.add(MaxPooling2D(pool_size=(3,3),strides=2)) model.add(Conv2D(192,(3,3),activation='relu',padding='same')) model.add(Conv2D(192,(1,1),activation='relu')) model.add(Conv2D(5,(1,1))) model.add(GlobalAveragePooling2D()) model.add(Flatten()) model.add(Dense(5, activation='softmax')) model.build(inputShape) model.compile(loss=categorical_crossentropy,optimizer=keras.optimizers.Adam(0.001),metrics=['accuracy']) return model
def get_lstm_model(): model = Sequential() # model.add(Embedding(input_dim = EMBEDDING_length + 1, # output_dim =EMBEDDING_DIM, # weights=[embedding_matrix], # # input_length=700, # mask_zero = True, # trainable=False)) model.add(LSTM(200, dropout=0.5, recurrent_dropout=0.5)) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(len(class_index), activation='softmax')) model.build((None, model_max_len, EMBEDDING_DIM)) model.summary() # tf.config.experimental_run_functions_eagerly(True) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) return model
def build_discriminator(): model = Sequential() model.add( Conv2D(64, 2, strides=1, padding='same', input_shape=(20, soildnumber, 1), activation='relu')) model.add(MaxPooling2D(2, 1, padding='same')) model.add(Conv2D(64, 2, strides=1, padding='same', activation='relu')) model.add(MaxPooling2D(2, 1, padding='same')) model.add(Flatten()) model.add(Dense(units=1, activation='sigmoid')) d_adam = Adam(0.0005, 0.5) model.compile(loss='binary_crossentropy', optimizer=d_adam) model.build((None, soildnumber, 20)) return model
def test_keras_import(self): model = Sequential() model.add(Embedding(100, output_dim=256)) model.add(LSTM(32, return_sequences=True)) model.add(SimpleRNN(64)) model.build() json_string = Model.to_json(model) with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out: json.dump(json.loads(json_string), out, indent=4) sample_file = open( os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r') response = self.client.post(reverse('keras-import'), {'file': sample_file}) response = json.loads(response.content) layerId = sorted(response['net'].keys()) self.assertEqual(response['result'], 'success') self.assertGreaterEqual(len(response['net'][layerId[2]]['params']), 3) self.assertGreaterEqual(len(response['net'][layerId[3]]['params']), 3)
def test_sequential_count_params(): input_dim = 20 num_units = 10 num_classes = 2 n = input_dim * num_units + num_units n += num_units * num_units + num_units n += num_units * num_classes + num_classes model = Sequential() model.add(Dense(num_units, input_shape=(input_dim,))) model.add(Dense(num_units)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.build() assert(n == model.count_params()) model.compile('sgd', 'binary_crossentropy') assert(n == model.count_params())
def test_sequential_count_params(): input_dim = 20 nb_units = 10 nb_classes = 2 n = input_dim * nb_units + nb_units n += nb_units * nb_units + nb_units n += nb_units * nb_classes + nb_classes model = Sequential() model.add(Dense(nb_units, input_shape=(input_dim,))) model.add(Dense(nb_units)) model.add(Dense(nb_classes)) model.add(Activation("softmax")) model.build() assert n == model.count_params() model.compile("sgd", "binary_crossentropy") assert n == model.count_params()
def model(): model = Sequential() model.add( Conv2D(filters=32, kernel_size=(5, 5), padding='same', activation='relu', input_shape=(224, 224, 3))) model.add(MaxPool2D(strides=2)) model.add( Conv2D(filters=48, kernel_size=(5, 5), padding='same', activation='relu')) model.add(MaxPool2D(strides=2)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(72, activation='relu')) model.add(Dense(3, activation='softmax')) model.build() return model
def __get_embeddigs(self): """ Creates a list of embedding layers for the neural networks. An embedding layer is corresponds to a word feature separately for every word in a sentence window. :return: """ embedding_models = [] max_feature_values = self.data_handler.get_tag_vocabulary_sizes() print(max_feature_values) # add an embedding layer for each feature of each word in a window for i in range(Config.window): for feature_value in max_feature_values: feature_model = Sequential() feature_model.add( Embedding(input_dim=feature_value, output_dim=Config.embedding_output_dim, input_length=1, trainable=True)) feature_model.build() embedding_models.append(feature_model) return embedding_models
def nin_cnn_c(inputShape): model = Sequential() model.add(Conv2D(32,kernel_size=(5,5),activation='relu',padding='valid')) model.add(Conv2D(32,kernel_size=(5,5),activation='relu')) model.add(Conv2D(32,kernel_size=(5,5),activation='relu')) model.add(MaxPooling2D(pool_size=(3,3),strides=2)) model.add(Dropout(0.5)) model.add(Conv2D(64,(3,3),activation='relu',padding='same')) model.add(Conv2D(64,(1,1),activation='relu',padding='same')) model.add(Conv2D(64,(1,1),activation='relu',padding='same')) model.add(MaxPooling2D(pool_size=(3,3),strides=2)) model.add(Dropout(0.5)) model.add(Conv2D(128,(3,3),activation='relu',padding='same')) model.add(Conv2D(32,(1,1),activation='relu')) model.add(Conv2D(5,(1,1))) model.add(GlobalAveragePooling2D()) model.add(Flatten()) model.add(Dense(5, activation='softmax')) model.build(inputShape) model.compile(loss=categorical_crossentropy,optimizer=Adam(0.001),metrics=['accuracy']) return model