def test_nested_sequential(in_tmpdir): (x_train, y_train), (x_test, y_test) = _get_test_data() inner = Sequential() inner.add(Dense(num_hidden, input_shape=(input_dim,))) inner.add(Activation('relu')) inner.add(Dense(num_class)) middle = Sequential() middle.add(inner) model = Sequential() model.add(middle) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False) model.train_on_batch(x_train[:32], y_train[:32]) loss = model.evaluate(x_test, y_test, verbose=0) model.predict(x_test, verbose=0) model.predict_classes(x_test, verbose=0) model.predict_proba(x_test, verbose=0) fname = 'test_nested_sequential_temp.h5' model.save_weights(fname, overwrite=True) inner = Sequential() inner.add(Dense(num_hidden, input_shape=(input_dim,))) inner.add(Activation('relu')) inner.add(Dense(num_class)) middle = Sequential() middle.add(inner) model = Sequential() model.add(middle) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.load_weights(fname) os.remove(fname) nloss = model.evaluate(x_test, y_test, verbose=0) assert(loss == nloss) # test serialization config = model.get_config() Sequential.from_config(config) model.summary() json_str = model.to_json() model_from_json(json_str) yaml_str = model.to_yaml() model_from_yaml(yaml_str)
def test_merge_sum(): (X_train, y_train), (X_test, y_test) = _get_test_data() left = Sequential() left.add(Dense(nb_hidden, input_shape=(input_dim,))) left.add(Activation('relu')) right = Sequential() right.add(Dense(nb_hidden, input_shape=(input_dim,))) right.add(Activation('relu')) model = Sequential() model.add(Merge([left, right], mode='sum')) model.add(Dense(nb_class)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test)) model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1) model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) loss = model.evaluate([X_test, X_test], y_test, verbose=0) model.predict([X_test, X_test], verbose=0) model.predict_classes([X_test, X_test], verbose=0) model.predict_proba([X_test, X_test], verbose=0) # test weight saving fname = 'test_merge_sum_temp.h5' model.save_weights(fname, overwrite=True) left = Sequential() left.add(Dense(nb_hidden, input_shape=(input_dim,))) left.add(Activation('relu')) right = Sequential() right.add(Dense(nb_hidden, input_shape=(input_dim,))) right.add(Activation('relu')) model = Sequential() model.add(Merge([left, right], mode='sum')) model.add(Dense(nb_class)) model.add(Activation('softmax')) model.load_weights(fname) os.remove(fname) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') nloss = model.evaluate([X_test, X_test], y_test, verbose=0) assert(loss == nloss) # test serialization config = model.get_config() Sequential.from_config(config) model.summary() json_str = model.to_json() model_from_json(json_str) yaml_str = model.to_yaml() model_from_yaml(yaml_str)
def test_temporal_classification(): ''' Classify temporal sequences of float numbers of length 3 into 2 classes using single layer of GRU units and softmax applied to the last activations of the units ''' np.random.seed(1337) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200, num_test=20, input_shape=(3, 4), classification=True, num_classes=2) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential() model.add(layers.GRU(8, input_shape=(x_train.shape[1], x_train.shape[2]))) model.add(layers.Dense(y_train.shape[-1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() history = model.fit(x_train, y_train, epochs=4, batch_size=10, validation_data=(x_test, y_test), verbose=0) assert(history.history['acc'][-1] >= 0.8) config = model.get_config() model = Sequential.from_config(config)
def test_image_classification(): np.random.seed(1337) input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=4) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential([ layers.Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), layers.MaxPooling2D(pool_size=2), layers.Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.Dense(y_test.shape[-1], activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() history = model.fit(x_train, y_train, epochs=10, batch_size=16, validation_data=(x_test, y_test), verbose=0) assert history.history['val_acc'][-1] > 0.75 config = model.get_config() model = Sequential.from_config(config)
def test_recursive(): # test layer-like API graph = Graph() graph.add_input(name='input1', input_shape=(32,)) graph.add_node(Dense(16), name='dense1', input='input1') graph.add_node(Dense(4), name='dense2', input='input1') graph.add_node(Dense(4), name='dense3', input='dense1') graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum') seq = Sequential() seq.add(Dense(32, input_shape=(32,))) seq.add(graph) seq.add(Dense(4)) seq.compile('rmsprop', 'mse') seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10) loss = seq.evaluate(X_test_graph, y_test_graph) # test serialization config = seq.get_config() new_graph = Sequential.from_config(config) seq.summary() json_str = seq.to_json() new_graph = model_from_json(json_str) yaml_str = seq.to_yaml() new_graph = model_from_yaml(yaml_str)
def test_vector_classification(): ''' Classify random float vectors into 2 classes with logistic regression using 2 layer neural network with ReLU hidden units. ''' (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=(20,), classification=True, num_classes=2) y_train = to_categorical(y_train) y_test = to_categorical(y_test) # Test with Sequential API model = Sequential([ layers.Dense(16, input_shape=(x_train.shape[-1],), activation='relu'), layers.Dense(8), layers.Activation('relu'), layers.Dense(y_train.shape[-1], activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() history = model.fit(x_train, y_train, epochs=15, batch_size=16, validation_data=(x_test, y_test), verbose=0) assert(history.history['val_acc'][-1] > 0.8) config = model.get_config() model = Sequential.from_config(config)
def test_nested_sequential(): (X_train, y_train), (X_test, y_test) = _get_test_data() inner = Sequential() inner.add(Dense(nb_hidden, input_shape=(input_dim,))) inner.add(Activation("relu")) inner.add(Dense(nb_class)) middle = Sequential() middle.add(inner) model = Sequential() model.add(middle) model.add(Activation("softmax")) model.compile(loss="categorical_crossentropy", optimizer="rmsprop") model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test)) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False) model.train_on_batch(X_train[:32], y_train[:32]) loss = model.evaluate(X_test, y_test, verbose=0) model.predict(X_test, verbose=0) model.predict_classes(X_test, verbose=0) model.predict_proba(X_test, verbose=0) fname = "test_nested_sequential_temp.h5" model.save_weights(fname, overwrite=True) inner = Sequential() inner.add(Dense(nb_hidden, input_shape=(input_dim,))) inner.add(Activation("relu")) inner.add(Dense(nb_class)) middle = Sequential() middle.add(inner) model = Sequential() model.add(middle) model.add(Activation("softmax")) model.compile(loss="categorical_crossentropy", optimizer="rmsprop") model.load_weights(fname) os.remove(fname) nloss = model.evaluate(X_test, y_test, verbose=0) assert loss == nloss # test serialization config = model.get_config() new_model = Sequential.from_config(config) model.summary() json_str = model.to_json() new_model = model_from_json(json_str) yaml_str = model.to_yaml() new_model = model_from_yaml(yaml_str)
def clone_model(model, custom_objects={}): config = model.get_config() try: clone = Sequential.from_config(config, custom_objects) except: clone = Model.from_config(config, custom_objects) clone.set_weights(model.get_weights()) return clone
def create_duplicate_model(model): """Create a duplicate keras model.""" new_model = Sequential.from_config(model.get_config()) new_model.set_weights(copy.deepcopy(model.get_weights())) new_model.compile(loss=model.loss,optimizer=model.optimizer) return new_model
def __init__ (self, model, max_memory=2000, discount=0.7, unfreeze_count=5): self.max_memory = max_memory self.discount = discount self.unfreeze_count = unfreeze_count self.memory = [] self.buffer = [] self.frozen_model = Sequential.from_config (model.get_config ()) self.frozen_model.compile (sgd(lr=.01), "mse")
def __init__ (self, model, max_memory, discount, unfreeze_count, num_actions): self.max_memory = max_memory self.discount = discount self.unfreeze_count = unfreeze_count self.num_actions = num_actions self.memory = list () # TODO dont assume sequential model # note taining algo has no affect because frozen model is never trianed self.frozen_model = Sequential.from_config (model.get_config ()) self.frozen_model.compile (sgd(lr=.01), "mse")
def clone_model(model, custom_objects={}): if len(custom_objects) > 0: warnings.warn('Due to an API problem with Keras, custom_objects is currently ignored. Sorry about that.') config = model.get_config() try: # TODO: re-enable custom_objects clone = Sequential.from_config(config) except: # TODO: re-enable custom_objects clone = Model.from_config(config) clone.set_weights(model.get_weights()) return clone
def test_merge_overlap(): (X_train, y_train), (X_test, y_test) = _get_test_data() left = Sequential() left.add(Dense(nb_hidden, input_shape=(input_dim,))) left.add(Activation('relu')) model = Sequential() model.add(Merge([left, left], mode='sum')) model.add(Dense(nb_class)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test)) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False) model.train_on_batch(X_train[:32], y_train[:32]) loss = model.evaluate(X_test, y_test, verbose=0) model.predict(X_test, verbose=0) model.predict_classes(X_test, verbose=0) model.predict_proba(X_test, verbose=0) fname = 'test_merge_overlap_temp.h5' print(model.layers) model.save_weights(fname, overwrite=True) print(model.trainable_weights) model.load_weights(fname) os.remove(fname) nloss = model.evaluate(X_test, y_test, verbose=0) assert(loss == nloss) # test serialization config = model.get_config() new_model = Sequential.from_config(config) model.summary() json_str = model.to_json() new_model = model_from_json(json_str) yaml_str = model.to_yaml() new_model = model_from_yaml(yaml_str)
def __init__(self, env, nn = None, **config): """ Based on: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf Parameters """ # if not isinstance(env.action_space, discrete.Discrete): # raise UnsupportedSpace('Action space {} incompatible with {}. (Only supports Discrete action spaces.)'.format(action_space, self)) self.env = env self.config = { 'eps': 0.05, 'gamma': 0.95, 'store_every':5, 'train_every':5, 'minibatch_size': 1, 'max_experience': 5000, 'target_nn_update_rate': 0.01, 'maxepoch': 100, 'maxstep': 100, 'outdir': '/tmp/brainresults', 'plot': True, 'render': True, } self.config.update(config) self.plotter = LivePlot(self.config['outdir']) # Deep Q Agent State self._action_ctr = 0 # actions excuted so far self._iter_ctr = 0 self._store_ctr = 0 self._train_ctr = 0 self._experience = deque() self.nn = nn # The keras network should be compiled outside self.tnn = Sequential.from_config(nn.get_config()) # Init target NN self.tnn.set_weights(self.nn.get_weights())
def load_keras_model_from_disk( model_json_path, weights_hdf_path, name=None): """ Loads a model from two files on disk: a JSON configuration and HDF5 weights. Parameters ---------- model_json_path : str weights_hdf_path : str name : str, optional Returns a Keras model. """ if not exists(model_json_path): raise ValueError("Model file %s (name = %s) not found" % ( model_json_path, name,)) with open(model_json_path, "r") as f: config_dict = json.load(f) if isinstance(config_dict, list): # not sure if this is a Keras bug but depending on the model I get back # either a list or a dict, the list is only usable with a Sequential # model model = Sequential.from_config(config_dict) else: model = model_from_config(config_dict) if weights_hdf_path is not None: if not exists(weights_hdf_path): raise ValueError( "Missing model weights file %s (name = %s)" % (weights_hdf_path, name)) model.load_weights(weights_hdf_path) return model
def test_sequential_deferred_build(): model = keras.models.Sequential() model.add(keras.layers.Dense(3)) model.add(keras.layers.Dense(3)) model.compile('sgd', 'mse') assert model.built is False assert len(model.layers) == 2 assert len(model.weights) == 0 model.train_on_batch( np.random.random((2, 4)), np.random.random((2, 3))) assert model.built is True assert len(model.layers) == 2 assert len(model.weights) == 4 # Test serialization config = model.get_config() assert 'name' in config new_model = Sequential.from_config(config) assert new_model.built is True assert len(new_model.layers) == 2 assert len(new_model.weights) == 4
import json from keras.models import model_from_json from keras.datasets import mnist from keras.utils import np_utils from keras.models import Sequential (X_train, y_train), (X_test, y_test) = mnist.load_data() X_test = X_test.reshape(X_test.shape[0], 1, 28, 28) X_test = X_test.astype('float32') X_test /= 255 nb_classes = 10 Y_test = np_utils.to_categorical(y_test, nb_classes) # Load trained model json_string = open('model.json').read() model_config = model_from_json(json_string).get_config() model = Sequential.from_config(model_config) model.compile(loss = 'categorical_crossentropy', optimizer = 'adadelta', metrics=['accuracy']) model.load_weights('modelweights.hdf5') # Evaluate with previously trained model score = model.evaluate(X_test, Y_test, verbose = 0) print('Test score:', score[0]) print('Test accuracy:', score[1])
def test_merge_recursivity(): (X_train, y_train), (X_test, y_test) = _get_test_data() left = Sequential() left.add(Dense(nb_hidden, input_shape=(input_dim,))) left.add(Activation("relu")) right = Sequential() right.add(Dense(nb_hidden, input_shape=(input_dim,))) right.add(Activation("relu")) righter = Sequential() righter.add(Dense(nb_hidden, input_shape=(input_dim,))) righter.add(Activation("relu")) intermediate = Sequential() intermediate.add(Merge([left, right], mode="sum")) intermediate.add(Dense(nb_hidden)) intermediate.add(Activation("relu")) model = Sequential() model.add(Merge([intermediate, righter], mode="sum")) model.add(Dense(nb_class)) model.add(Activation("softmax")) model.compile(loss="categorical_crossentropy", optimizer="rmsprop") model.fit( [X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test, X_test], y_test), ) model.fit( [X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1 ) model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) loss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0) model.predict([X_test, X_test, X_test], verbose=0) model.predict_classes([X_test, X_test, X_test], verbose=0) model.predict_proba([X_test, X_test, X_test], verbose=0) fname = "test_merge_recursivity_temp.h5" model.save_weights(fname, overwrite=True) model.load_weights(fname) os.remove(fname) nloss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0) assert loss == nloss # test serialization config = model.get_config() new_model = Sequential.from_config(config) model.summary() json_str = model.to_json() new_model = model_from_json(json_str) yaml_str = model.to_yaml() new_model = model_from_yaml(yaml_str)
def test_sequential(in_tmpdir): (x_train, y_train), (x_test, y_test) = _get_test_data() # TODO: factor out def data_generator(x, y, batch_size=50): index_array = np.arange(len(x)) while 1: batches = make_batches(len(x_test), batch_size) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] x_batch = x[batch_ids] y_batch = y[batch_ids] yield (x_batch, y_batch) model = Sequential() model.add(Dense(num_hidden, input_shape=(input_dim,))) model.add(Activation('relu')) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False) model.train_on_batch(x_train[:32], y_train[:32]) loss = model.evaluate(x_test, y_test) prediction = model.predict_generator(data_generator(x_test, y_test), 1, max_queue_size=2, verbose=1) gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50), 1, max_queue_size=2) pred_loss = K.eval(K.mean(losses.get(model.loss)(K.variable(y_test), K.variable(prediction)))) assert(np.isclose(pred_loss, loss)) assert(np.isclose(gen_loss, loss)) model.predict(x_test, verbose=0) model.predict_classes(x_test, verbose=0) model.predict_proba(x_test, verbose=0) fname = 'test_sequential_temp.h5' model.save_weights(fname, overwrite=True) model = Sequential() model.add(Dense(num_hidden, input_shape=(input_dim,))) model.add(Activation('relu')) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.load_weights(fname) os.remove(fname) nloss = model.evaluate(x_test, y_test, verbose=0) assert(loss == nloss) # Test serialization config = model.get_config() assert 'name' in config new_model = Sequential.from_config(config) assert new_model.weights # Model should be built. model.summary() json_str = model.to_json() model_from_json(json_str) yaml_str = model.to_yaml() model_from_yaml(yaml_str)
def test_sequential(): (X_train, y_train), (X_test, y_test) = _get_test_data() # TODO: factor out def data_generator(x, y, batch_size=50): index_array = np.arange(len(x)) while 1: batches = make_batches(len(X_test), batch_size) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] x_batch = x[batch_ids] y_batch = y[batch_ids] yield (x_batch, y_batch) model = Sequential() model.add(Dense(nb_hidden, input_shape=(input_dim,))) model.add(Activation("relu")) model.add(Dense(nb_class)) model.add(Activation("softmax")) model.compile(loss="categorical_crossentropy", optimizer="rmsprop") model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test)) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False) model.train_on_batch(X_train[:32], y_train[:32]) loss = model.evaluate(X_test, y_test) prediction = model.predict_generator(data_generator(X_test, y_test), X_test.shape[0], max_q_size=2) gen_loss = model.evaluate_generator(data_generator(X_test, y_test, 50), X_test.shape[0], max_q_size=2) pred_loss = K.eval(K.mean(objectives.get(model.loss)(K.variable(y_test), K.variable(prediction)))) assert np.isclose(pred_loss, loss) assert np.isclose(gen_loss, loss) model.predict(X_test, verbose=0) model.predict_classes(X_test, verbose=0) model.predict_proba(X_test, verbose=0) fname = "test_sequential_temp.h5" model.save_weights(fname, overwrite=True) model = Sequential() model.add(Dense(nb_hidden, input_shape=(input_dim,))) model.add(Activation("relu")) model.add(Dense(nb_class)) model.add(Activation("softmax")) model.compile(loss="categorical_crossentropy", optimizer="rmsprop") model.load_weights(fname) os.remove(fname) nloss = model.evaluate(X_test, y_test, verbose=0) assert loss == nloss # test serialization config = model.get_config() new_model = Sequential.from_config(config) model.summary() json_str = model.to_json() new_model = model_from_json(json_str) yaml_str = model.to_yaml() new_model = model_from_yaml(yaml_str)
def printPrediction(model, smilesData): # FIXME hardcoded smilesDf = pd.DataFrame(smilesData, columns=[cc.exp['params']['data']['smiles']]) input = data.formatSequentialInput(smilesDf) output = model.predict(input) for i, smiles in enumerate(smilesData): print 'Prediction for {}'.format(smiles) print output[i] distanceMatrixCosine = pairwise_distances(output, metric='cosine') distanceMatrixCorrel = pairwise_distances(output, metric='correlation') distanceMatrixEuclid = pairwise_distances(output, metric='euclidean') print 'Distance matrix cosine' print distanceMatrixCosine print 'Distance matrix correlation' print distanceMatrixCorrel print 'Distance matrix euclid' print distanceMatrixEuclid ''' layerIdx = 1 cfg = model.get_config()[:layerIdx+1] cfg[0]['config']['dropout_U'] = 0 cfg[0]['config']['dropout_W'] = 0 print cfg[0] print cfg[1] # del cfg[1] # layerIdx -= 1 # print cfg cfg[layerIdx]['config']['return_sequences'] = True ''' layerIdx = 2 cfg = model.get_config()[:layerIdx+1] del cfg[1] layerIdx -= 1 # print cfg cfg[layerIdx]['config']['return_sequences'] = True seqModel = Sequential.from_config(cfg) seqModel.set_weights(model.get_weights()) seqModel.layers[layerIdx].return_sequences = True outputFunction = K.function([seqModel.layers[0].input], [seqModel.layers[layerIdx].output]) outputSymbols = outputFunction([input])[0] outputLastSymbol = outputSymbols[:,outputSymbols.shape[1]-1,:] distanceMatrixLastSymbolCorrel = np.corrcoef(outputLastSymbol) print 'Distance matrix last symbol correlation' print distanceMatrixLastSymbolCorrel
def fam(train_i, train_o, test_i, test_o): sess = tf.Session() K.set_session(sess) K.set_learning_phase(1) batch_size = 60 nb_classes = len(MOD) nb_epoch = 20 img_rows, img_cols = 2 * P * L, 2 * Np nb_filters = 96 nb_pool = 2 X_train,Y_train = shuffle_in_unison_inplace( np.array(train_i) , np.array(train_o) ) model = Sequential() model.add(Convolution2D(64, 11, 11,subsample=(2,2), input_shape=(1, img_rows, img_cols))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(128, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(512,activation='relu')) model.add(Dropout(0.5)) model.add(Dense(512,activation='relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes,init='normal')) model.add(Activation('softmax', name="out")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) """ datagen = ImageDataGenerator( #featurewise_center=True, #featurewise_std_normalization=True, rotation_range=20, #width_shift_range=0.3, #height_shift_range=0.3, #zoom_range=[0,1.3], horizontal_flip=True, vertical_flip=True) datagen.fit(X_train) model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size,shuffle=True), samples_per_epoch=len(X_train), nb_epoch=5,verbose=1,validation_data=(test_i[0], test_o[0])) """ model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=True, validation_data=(test_i[0], test_o[0])) for s in range(len(test_i)): if len(test_i[s]) == 0: continue X_test = test_i[s] Y_test = test_o[s] score = model.evaluate(X_test, Y_test, verbose=0) print("SNR", SNR[s], "Test accuracy:", score[1]) K.set_learning_phase(0) config = model.get_config() weights = model.get_weights() new_model = Sequential.from_config(config) new_model.set_weights(weights) export_path = "/tmp/fam" export_version = 1 labels_tensor = tf.constant(MOD) saver = tf.train.Saver(sharded=True) model_exporter = exporter.Exporter(saver) signature = exporter.classification_signature( input_tensor=new_model.input,classes_tensor=labels_tensor,scores_tensor=new_model.output) model_exporter.init( sess.graph.as_graph_def(), default_graph_signature=signature) model_exporter.export(export_path, tf.constant(export_version), sess)
def visualizeSequentialOutput(model, layerIdx, df): if not os.path.exists(cc.cfg['plots']['seq_output_dir']): os.makedirs(cc.cfg['plots']['seq_output_dir']) if cc.cfg['plots']['seq_output_seq_input_name'] == 'smiles': input = data.formatSequentialInput(df) elif cc.cfg['plots']['seq_output_seq_input_name'] == 'fasta': input = data.formatFastaInput(df) else: raise 'visual err' # model.layers[layerIdx].return_sequences = True # model.compile(loss="mean_squared_error", optimizer="rmsprop") cfg = model.get_config()[:4] cfg = model.get_config()[:layerIdx+1] del cfg[2] layerIdx -= 1 # print cfg cfg[layerIdx]['config']['return_sequences'] = True seqModel = Sequential.from_config(cfg) seqModel.set_weights(model.get_weights()) seqModel.layers[layerIdx].return_sequences = True outputFunction = K.function([seqModel.layers[0].input], [seqModel.layers[layerIdx].output]) output = outputFunction([input])[0] ''' sns.set() for i,smilesOutput in enumerate(output): g = sns.clustermap(smilesOutput.T, col_cluster=False, method='single',metric='cosine') g.savefig('{}/seq_output.png'.format(cc.cfg['plots']['seq_output_dir'])) ''' dropSet = Set(cc.cfg['plots']['seq_output_ignore_neurons']) if cc.cfg['plots']['seq_output_select_neurons']: arrMask = cc.cfg['plots']['seq_output_select_neurons'] else: arrMask = list(range(output.shape[2])) arrMask = np.array([x for x in arrMask if not x in dropSet]) fig = plt.figure(figsize=(input.shape[1] * 0.3,len(arrMask) * len(df) * 1.5)) for i,seqOutput in enumerate(output): # print seqOutput.shape # print seqOutput selected = seqOutput.T[arrMask] Z = sch.linkage(selected, method='single', metric='cosine') leaves = sch.leaves_list(Z) # leaves = range(len(selected)) reordered = selected[leaves] ax = fig.add_subplot(len(df),1,i+1) print 'foo' ppl.pcolormesh(fig, ax, reordered, xticklabels=list(df.values[i][0]), yticklabels=arrMask[leaves], vmin=-1, vmax=1) print 'foo' print 'bar' fig.savefig('{}/{}'.format(cc.cfg['plots']['seq_output_dir'],cc.cfg['plots']['seq_output_name'])) print 'bar'