class Rede_neural(object): def __init__(self, wids=None): types = "" if wids == None: types = "m" else: types = "zeros" self.model = Sequential() self.model.add( Dense(64, input_dim=16, kernel_initializer=types, bias_initializer=types)) self.model.add(Activation('hard_sigmoid')) self.model.add( Dense(4, kernel_initializer=types, bias_initializer=types)) self.model.add(Activation('softmax')) if wids == None: self.wids = self.model.get_weights() else: self.setWid(wids) def getWid(self): return self.wids def setWid(self, wid): self.wids = wid self.model.set_weights(wid) def predict(self, X): X = np.matrix(X.flatten()) Y = self.model.predict(X) return (Y.argmax())
def make_model(meta): 'create model based on meta definition' model = Sequential() model.add(InputLayer(input_shape=(width, height, 1))) model.add(Conv2D(1, )) for l in range(meta[0]): print("LSTM({})".format(meta[1 + l * 2])) model.add(LSTM(meta[1 + l * 2])) if meta[2 + l * 2] > 0: print("DROPOUT(0.75)") model.add(Dropout(0.75)) print("Dense({})".format(meta[-1])) model.add(Dense(meta[-1], activation='relu')) model.add(Dropout(0.75)) model.add(Dense(2, activation='softmax')) model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) #randomize weights weights = model.get_weights() weights = [np.random.normal(size=w.shape) for w in weights] model.set_weights(weights) return model
def test_gradient_tape_doesnt_crash_when_model_has_non_trainable_variables(self): # Given initial_model = Sequential([ tf.keras.layers.Input((1,)), Dense(3), BatchNormalization(), Dense(7) ]) initial_weights = initial_model.get_weights() x = np.array([[1]]) # When updated_model = clone_model(initial_model) take_n_gradient_step( initial_model, updated_model, n_step=1, alpha=1.0, loss=(lambda y, p: p), data_x=x, data_y=x ) # Then np.testing.assert_equal(initial_weights[4], updated_model.get_weights()[4]) # Moving mean np.testing.assert_equal(initial_weights[5], updated_model.get_weights()[5]) # Moving Variance
def build_model(self, X): # assumes that data axis order is same as the backend input_shape = X.shape[1:] np.random.seed(self.random_state) tf.set_random_seed(self.random_state) model = Sequential() model.add(Conv2D(96, (3, 3), padding='same', input_shape=input_shape, name='conv1')) model.add(Activation('relu')) model.add(Conv2D(96, (3, 3), name='conv2', padding='same')) model.add(Activation('relu')) model.add(Conv2D(96, (3, 3), strides=(2, 2), padding='same', name='conv3')) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Conv2D(192, (3, 3), name='conv4', padding='same')) model.add(Activation('relu')) model.add(Conv2D(192, (3, 3), name='conv5', padding='same')) model.add(Activation('relu')) model.add(Conv2D(192, (3, 3), strides=(2, 2), name='conv6', padding='same')) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Conv2D(192, (3, 3), name='conv7', padding='same')) model.add(Activation('relu')) model.add(Conv2D(192, (1, 1), name='conv8', padding='valid')) model.add(Activation('relu')) model.add(Conv2D(10, (1, 1), name='conv9', padding='valid')) model.add(GlobalAveragePooling2D()) model.add(Activation('softmax', name='activation_top')) model.summary() try: optimizer = getattr(keras.optimizers, self.solver) except: raise NotImplementedError('optimizer not implemented in keras') # All optimizers with the exception of nadam take decay as named arg try: opt = optimizer(lr=self.learning_rate, decay=self.lr_decay) except: opt = optimizer(lr=self.learning_rate, schedule_decay=self.lr_decay) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) # Save initial weights so that model can be retrained with same # initialization self.initial_weights = copy.deepcopy(model.get_weights()) self.model = model
def build_model(self, X): # assumes that data axis order is same as the backend input_shape = X.shape[1:] np.random.seed(self.random_state) tf.set_random_seed(self.random_state) model = Sequential() model.add( Conv2D(32, (3, 3), padding='same', input_shape=input_shape, name='conv1')) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3), name='conv2')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding='same', name='conv3')) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3), name='conv4')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, name='dense1')) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(self.n_classes, name='dense2')) model.add(Activation('softmax')) try: optimizer = getattr(keras.optimizers, self.solver) except: raise NotImplementedError('optimizer not implemented in keras') # All optimizers with the exception of nadam take decay as named arg try: opt = optimizer(lr=self.learning_rate, decay=self.lr_decay) except: opt = optimizer(lr=self.learning_rate, schedule_decay=self.lr_decay) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) # Save initial weights so that model can be retrained with same # initialization self.initial_weights = copy.deepcopy(model.get_weights()) self.model = model
def make_model(meta): 'create model based on meta definition' model = Sequential() model.add(InputLayer(input_shape=(width, height, 1))) for l in range(meta[0]): print("Conv2D({},{},{})".format(meta[1+l*5], meta[2+l*5], meta[3+l*5])) model.add(Conv2D(meta[1+l*5], kernel_size=meta[2+l*5], strides=meta[3+l*5], activation='relu')) if meta[4+l*5] > 0: print("MaxPooling2D({},{})".format(meta[4+l*5], meta[5+l*5])) model.add(MaxPooling2D(pool_size=meta[4+l*5], strides=meta[5+l*5])) model.add(Dropout(0.1)) model.add(Flatten()) if meta[-1]>0: print("Dense({})".format(meta[-1])) model.add(Dense(meta[-1], activation='relu')) model.add(Dropout(0.3)) model.add(Dense(2, activation='softmax')) model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) #randomize weights weights = model.get_weights() weights = [np.random.normal(size=w.shape) for w in weights] model.set_weights(weights) return model
q_net.compile(optimizer=adam(lr=stepsize), loss=mse, metrics=['accuracy']) # Initialize the weights of the target network with θ⋆ = θ target_net = Sequential([ Dense(10, input_shape=( None, 4, )), Activation('relu'), Dense(10), Activation('relu'), Dense(2), Activation('linear') ]) target_net.compile(optimizer=adam(lr=stepsize), loss=mse, metrics=['accuracy']) target_net.set_weights(q_net.get_weights()) # Initialize the initial state S0 cp_env = gym.make('CartPole-v1') curr_state = cp_env.reset() # for t = 0, 1, ..., T do for t in range(num_iterations): # With probability epsilon, choose At uniformly at random from A if random.uniform(0, 1) < exploration_prob: action = cp_env.action_space.sample() # and with probability 1 − epsilon, choose At such that Qθ(St, At) = maxa ∈ A Qθ(St, a) else: action = np.argmax( np.squeeze(q_net.predict(curr_state.reshape( 1,
model.add(Conv2D(filters=256, kernel_size=2, activation='elu')) model.add(MaxPool2D(2)) model.add(Conv2D(filters=512, kernel_size=2, activation='elu')) model.add(MaxPool2D(2)) model.add(Flatten()) model.add(Dense(1024, activation='elu')) #model.add(Dropout(rate=0.5)) model.add(Dense(10, activation='softmax')) model.compile(optimizer=rmsprop(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy']) print(model.get_weights()) spectrogramsAvalaible = sC.getAvalaibleSpectrograms() dataSetTrain = createTrainingSet(spectrogramsAvalaible, 700) dataSetVal = createTrainingSet(spectrogramsAvalaible, 200) dataSetTrainX, dataSetTrainY = zip(*dataSetTrain) dataSetValX, dataSetValY = zip(*dataSetVal) trainX = numpy.asarray(dataSetTrainX) trainX = trainX.reshape([-1, 128, 128, 1]) trainY = fit_trasform(dataSetTrainY, getAllGenres()) validX = numpy.asarray(dataSetValX) validX = validX.reshape([-1, 128, 128, 1]) validY = fit_trasform(dataSetValY, getAllGenres()) model.fit(trainX,
padding='same', input_shape=(28, 28, 1))) autoencoder.add(MaxPooling2D((2, 2), padding='same')) autoencoder.add(Conv2D(8, (3, 3), 1, activation='relu', padding='same')) autoencoder.add(MaxPooling2D((2, 2), padding='same')) # Decoder autoencoder.add(Conv2D(8, (3, 3), 1, activation='relu', padding='same')) autoencoder.add(UpSampling2D((2, 2))) autoencoder.add(Conv2D(16, (3, 3), 1, activation='relu', padding='same')) autoencoder.add(UpSampling2D((2, 2))) # 出力データのチャンネルを1にするため畳み込み autoencoder.add(Conv2D(1, (3, 3), 1, activation='sigmoid', padding='same')) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') initial_weights = autoencoder.get_weights() autoencoder.summary() # ガウシアンノイズデータを用いた学習と予測 autoencoder.fit( x_train_gauss, # 入力 x_train, # 正解 epochs=10, batch_size=20, shuffle=True) gauss_preds = autoencoder.predict(x_test_gauss) for i in range(10): array_to_img(x_test[i]).save('x_test_%d.png' % i) array_to_img(x_test_gauss[i]).save('x_test_gauss_%d.png' % i) array_to_img(gauss_preds[i]).save('x_test_gauss_pred_%d.png' % i)
def example_1(): simutation_parameters = { "PWM_file": "/home/qan/Desktop/DeepEpitif/DeepMetif/JASPAR2018_CORE_vertebrates_non-redundant_pfms_jaspar/MA0835.1.jaspar", "seq_length": 100, "center_pos": 20, "motif_width": 14, "metif_level": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } [train_X, train_Y, valid_X, valid_Y, test_X, test_Y] = get_simulated_dataset(parameters=simutation_parameters, train_size=16000, valid_size=2000, test_size=20) #print(train_X.dtype) #print(train_Y.dtype) #print(train_X[2,:,:,:]) #print(train_Y) #print(train_X.shape[1::]) #exit() one_filter_keras_model = Sequential() one_filter_keras_model.add( Conv2D(filters=5, kernel_size=(1, 15), padding="same", input_shape=train_X.shape[1::])) one_filter_keras_model.add(BatchNormalization(axis=-1)) one_filter_keras_model.add(Activation('relu')) one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 35))) one_filter_keras_model.add(Flatten()) one_filter_keras_model.add(Dense(1)) one_filter_keras_model.add(Activation("sigmoid")) one_filter_keras_model.summary() one_filter_keras_model.compile(optimizer='adam', loss='binary_crossentropy') metrics_callback = MetricsCallback(train_data=(train_X, train_Y), validation_data=(valid_X, valid_Y)) print(one_filter_keras_model.get_weights()) history_one_filter = one_filter_keras_model.fit( x=train_X, y=train_Y, batch_size=10, epochs=50, verbose=1, callbacks=[History(), metrics_callback], validation_data=(valid_X, valid_Y)) #print(one_filter_keras_model.get_weights()) one_filter_keras_model_json = one_filter_keras_model.to_json() with open("one_filter_keras_model.json", "w") as json_file: json_file.write(one_filter_keras_model_json) one_filter_keras_model.save_weights("one_filter_keras_model.h5") print("Saved model to disk")
def _build_model(hidden_layers, activation='relu', l_rate=0.01): model = Sequential() for i, nodes in enumerate(hidden_layers): if i == 0: model.add(Dense(nodes, input_dim=1, activation='linear')) else: model.add(Dense(nodes)) model.add(Activation(activation)) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=l_rate), loss='mse') return model l_rate = 0.001 x = np.array([[0.1 * i] for i in range(10)]) y = np.array([el ** 2 - 2 * el + 5 for el in x]) epochs = 100 tb_log = tf.keras.callbacks.TensorBoard(log_dir='graph', histogram_freq=0, write_graph=True, write_images=True) model = Sequential([ Dense(4, activation='relu',name='hidden_layer1'), Dense(7, activation='relu',name='hidden_layer2'), ]) model.compile(optimizer=Adam(),loss='mse') res = model.fit(x, y, epochs=epochs, verbose=1,callbacks=[tb_log]) weights = model.get_weights() a = 2