Esempio n. 1
0
def run_tf_example3(verbose):
    if (verbose):
        print("\n***RUNNING TENSORFLOW***")
    #print needed values.
    np.set_printoptions(precision=5)

    #Create a feed forward network
    model = Sequential()
    #model.add(layers.Conv2D(2,3,input_shape=(7,7,1),activation='sigmoid'))
    model.add(layers.Conv2D(2, 3, input_shape=(8, 8, 1), activation='sigmoid'))
    model.add(
        layers.MaxPool2D(pool_size=(2, 2),
                         strides=(2, 2),
                         padding='valid',
                         data_format=None))
    model.add(layers.Flatten())
    model.add(layers.Dense(1, activation='sigmoid'))

    l1k1, l1k2, l1b1, l1b2, l2, input, output = generateExample3()

    #setting weights and bias of first layer.
    l1k1 = l1k1.reshape(3, 3, 1, 1)
    l1k2 = l1k2.reshape(3, 3, 1, 1)

    w1 = np.concatenate((l1k1, l1k2), axis=3)
    model.layers[0].set_weights([w1, np.array(
        [l1b1[0],
         l1b2[0]])])  #Shape of weight matrix is (w,h,input_channels,kernels)

    #setting weights and bias of fully connected layer.
    model.layers[3].set_weights(l2)

    #Setting input. Tensor flow is expecting a 4d array since the first dimension is the batch size (here we set it to one), and third dimension is channels
    img = np.expand_dims(input, axis=(0, 3))

    if (verbose):
        print_model_info(model, img)
        print('\ntraining...')

    sgd = optimizers.SGD(lr=100)
    model.compile(loss='MSE', optimizer=sgd, metrics=['accuracy'])
    history = model.fit(img, output, batch_size=1, epochs=1, verbose=verbose)

    if (verbose):
        print_model_info(model, img)
        print('loss: ', history.history['loss'])

    l1k = np.squeeze([
        model.get_weights()[0][:, :, 0, 0],
        model.get_weights()[0][:, :, 0, 1]
    ]).reshape(2, 3, 3)
    l1b = np.squeeze(model.get_weights()[1][:])

    l4 = np.squeeze(model.get_weights()[2])
    l4b = np.squeeze(model.get_weights()[3])

    return l1k, l1b, l4, l4b
    def test_simple_save_load(self):
        f = NamedTemporaryFile()
        m = Sequential([Dense(10, input_dim=2), Dense(2)])
        m.save(f.name)

        w = m.get_weights()
        updates = keras_utils.load_weights_by_name(f.name, m)

        self.assertEqual(4, len(updates))
        for w1, w2 in zip(w, m.get_weights()):
            self.assertTrue(np.allclose(w1, w2))
    def test_nested_save_load(self):
        f = NamedTemporaryFile()
        m = Sequential([Dense(10, input_dim=2), Dense(2)])
        x1, x2 = Input(shape=(2, )), Input(shape=(2, ))
        y1, y2 = m(x1), m(x2)
        model = Model([x1, x2], [y1, y2])
        model.save(f.name)

        w = m.get_weights()
        updates = keras_utils.load_weights_by_name(f.name, m)

        self.assertEqual(4, len(updates))
        for w1, w2 in zip(w, m.get_weights()):
            self.assertTrue(np.allclose(w1, w2))
Esempio n. 4
0
def model(x_train, y_train, x_test, y_test):
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Dropout, Activation
    from tensorflow.keras.optimizers import RMSprop

    keras_model = Sequential()
    keras_model.add(Dense(512, input_shape=(784,)))
    keras_model.add(Activation('relu'))
    keras_model.add(Dropout({{uniform(0, 1)}}))
    keras_model.add(Dense({{choice([256, 512, 1024])}}))
    keras_model.add(Activation('relu'))
    keras_model.add(Dropout({{uniform(0, 1)}}))
    keras_model.add(Dense(10))
    keras_model.add(Activation('softmax'))

    rms = RMSprop()
    keras_model.compile(loss='categorical_crossentropy',
                        optimizer=rms, metrics=['acc'])

    keras_model.fit(x_train, y_train,
                    batch_size={{choice([64, 128])}},
                    epochs=1,
                    verbose=2,
                    validation_data=(x_test, y_test))
    score, acc = keras_model.evaluate(x_test, y_test, verbose=0)
    return {'loss': -acc, 'status': STATUS_OK, 'model': keras_model.to_yaml(),
            'weights': pickle.dumps(keras_model.get_weights())}
Esempio n. 5
0
def save_model_json(model: Sequential, filename: str) -> None:
    """
    Saves a model's architecture and weights as a JSON file. The output JSON will 
    contain a "model" field with "specs" and "weights" fields inside it. The 
    "specs" field contains the model's architecture and the "weights" field
    contains the weights.

    Args:
      model (keras.models.Sequential):
        The model to save.
      filename (str):
        The name of the file to save the model in. This should have a '.json'
        extension.
    """

    model_dict = {"model": {}}

    model_json = model.to_json()
    model_dict["model"]["specs"] = json.loads(model_json)

    weights = model.get_weights()
    # Convert weight arrays to lists because those are JSON compatible
    weights = nested_arrays_to_lists(weights)
    model_dict["model"]["weights"] = weights

    model_dict["schema"] = "orquestra-v1-model"

    try:
        with open(filename, "w") as f:
            f.write(json.dumps(model_dict, indent=2))
    except IOError:
        print('Error: Could not load {filename}')
def get_skipgram_sentence_embedding_matrix(text, dim=200, batch_size=256, window_size=5, epochs = 1):
    if os.path.isfile("data/sentqs_skipgram_sentence_embedding.npz"):
        loaded_embedding = np.load("data/sentqs_skipgram_sentence_embedding.npz")
        loaded_embedding = loaded_embedding["embedding"]
        print('Loaded Skipgram embedding.')
        return loaded_embedding
    else:
        text = [''.join(x) for x in text]
        t = Tokenizer()
        t.fit_on_texts(text)
        corpus = t.texts_to_sequences(text)
        #print(corpus)
        V = len(t.word_index)
        step_size = len(corpus) // batch_size
        model = Sequential()
        model.add(Dense(units=dim, input_dim=V, activation="softmax"))
        model.add(Dense(units=V, input_dim=dim, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.summary()

        model.fit(generate_data(corpus, window_size, V), epochs=epochs, steps_per_epoch=step_size)
        # model.save("data/sentqs_full_skigram_arc.h5")
        mlb = MultiLabelBinarizer()
        enc = mlb.fit_transform(corpus)
        emb = enc @ model.get_weights()[0]
        np.savez_compressed("data/sentqs_skipgram_sentence_embedding", embedding=emb)
        return emb
class Net(object):
    """docstring for Net"""
    def __init__(self, input_size, output_size, hidden_size):
        self.model = Sequential()
        self.model.add(keras.Input(shape=input_size))

        for i in range(hidden_size):
            self.model.add(Dense(hidden_size, activation='relu'))

        self.model.add(Dense(output_size, activation='relu'))
        self.model.compile(loss="mse",
                           optimizer=Adam(lr=0.1),
                           metrics=['accuracy'])

    def forward(self, state):
        return self.model.predict(tf.convert_to_tensor([state]))[0]

    def train(self, X, y):
        self.model.fit(tf.convert_to_tensor(X), tf.convert_to_tensor(y))

    def get_params(self):
        return self.model.get_weights()

    def set_params(self, params):
        self.model.set_weights(params)
Esempio n. 8
0
 def test_dense_softmax(self):
     np.random.seed(1987)
     # Define a model
     model = Sequential()
     model.add(Dense(32, input_shape=(32,), activation='softmax'))
     # Set some random weights
     model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
     # Test it
     self._test_keras_model(model)
Esempio n. 9
0
class SnakeBrain():
    def __init__(self):
        '''
            This class create model of snake brain - represents by neural network.

            The first layer contains 16 neurons with information about situation
            on a map:

            direction of the food: 8 neurons (snake_collect_food function)
            direction and angel between head and food: 2 neurons (snake_food_sense)
            distance to wall: 4 nuerons (snake_collect_wall)
            direction of the snake moving: 4 nuerons (snake_collect_direction)
            direction of the snake body: 8 neurons (snake_collect_body)

            The second layer was choosen to be sufficient in the network
            architecture as hidden layer.

            The third layer is output layer with decision direction:
                0 - left
                1 - forword
                2 - right
        '''
        self.brain = Sequential()
        # self.brain.add(Dense(16, input_dim=26, activation='sigmoid'))
        # self.brain.add(Dense(16, activation='sigmoid'))
        self.brain.add(Dense(26, input_dim=26, activation='sigmoid'))
        self.brain.add(Dense(26, activation='sigmoid'))
        self.brain.add(Dense(3, activation='sigmoid'))
        self.brain.add(Activation("softmax"))
        self.brain.compile(optimizer="adam",
                           loss='mean_squared_error',
                           metrics=['accuracy'])

    def get_genotype(self):
        '''
            This function return weights of neural networks.
        '''
        return self.brain.get_weights()

    def make_decision(self, input_data):
        '''
            model.predict built-in function try out the model
            from the training data passed in argument.
        '''
        return self.brain.predict(input_data)

    def set_genotype(self, genotype):
        '''
            Setter function, built-in in tensorflow.
        '''
        self.brain.set_weights(genotype)

    def save_genotype(self, file):
        self.brain.save_weights(file)

    def load_genotype(self, file):
        self.brain.load_weights(file)
Esempio n. 10
0
def test_cosinedense_correctness():
    X = np.random.randn(1, 20)
    model = Sequential()
    model.add(core.CosineDense(1, use_bias=True, input_shape=(20, )))
    model.compile(loss='mse', optimizer='rmsprop')
    W = model.get_weights()
    W[0] = X.T
    W[1] = np.asarray([1.])
    model.set_weights(W)
    out = model.predict(X)
    assert_allclose(out, np.ones((1, 1), dtype=K.floatx()), atol=1e-5)

    X = np.random.randn(1, 20)
    model = Sequential()
    model.add(core.CosineDense(1, use_bias=False, input_shape=(20, )))
    model.compile(loss='mse', optimizer='rmsprop')
    W = model.get_weights()
    W[0] = -2 * X.T
    model.set_weights(W)
    out = model.predict(X)
    assert_allclose(out, -np.ones((1, 1), dtype=K.floatx()), atol=1e-5)
Esempio n. 11
0
    def create_nn(self):
        '''Create individual of population'''

        model = Sequential()
        model.add(Dense(self.num_features, input_shape=(self.num_features, )))

        for layer in range(self.num_layers):

            try:
                nodes = self.nodes_per_layer[layer]
            except IndexError:
                nodes = None

            if nodes is None:
                nodes = self.default_nodes

            model.add(Dense(units=nodes, activation='relu'))

        #output layer
        model.add(Dense(units=self.num_outputs, activation=self.activation))
        model.compile(optimizer=Adam(lr=0.001),
                      loss='mse',
                      metrics=['accuracy'])

        #create deserialize dependencies
        if self.weight_shapes is None:
            model.summary()
            self.weight_shapes = []
            self.weights_lengths = []

            weights = model.get_weights()
            for x in weights:
                self.weight_shapes.append(x.shape)

                #generate indicies of weights to recreate weight structure from gene string
                length = len(x.reshape(1, -1)[0].tolist())
                if not self.weights_lengths:
                    self.weights_lengths.append(length)
                else:
                    self.weights_lengths.append(
                        self.weights_lengths[len(self.weights_lengths) - 1] +
                        length)
            if self.mxrt == 'default':
                self.mxrt = math.log(self.weights_lengths[-1],
                                     2) / (self.weights_lengths[-1])
            print('Weight Shapes:', self.weight_shapes)
            print('Weight Lengths:', self.weights_lengths)
            print('Mutation Rate:', self.mxrt)
            print('Crossover Type:', self.cxtype)
            print('Selection Type:', self.selection_type)
            print('Sharpness:', self.sharpness)
        return model
Esempio n. 12
0
class Brain:
    def __init__(self):
        # a Keras.Sequential model is needed for our DNN
        self.model = Sequential()
        # this is the hidden layer with four neurons.
        # we have these three inputs:
        #   1) bird's y position
        #   2) bird's vertical distance to the top pipe
        #   3) bird's vertical distance to the bottom pipe
        # so the input_shape for the hidden layer is a tensor with three elements
        self.model.add(Dense(units=4, input_shape=(3, ), activation='sigmoid'))
        # the output layer has two neurons:
        #   1) jump
        #   2) no jump
        self.model.add(Dense(units=2, activation='softmax'))

    def loadFromFile(self, fn):
        data = np.load(fn, allow_pickle=True)
        self.setData(data)

    def saveToFile(self, fn):
        np.save(fn, self.model.get_weights())

    # set the weights and biases of neurons
    def setData(self, brainData):
        brain = np.array(brainData)
        # mutation happens here
        for index, data in enumerate(brain):
            if random.uniform(0, 1) <= MUTATION_RATE:
                brain[index] = data + random.uniform(-1, 1)
        # set mutated weights
        self.model.set_weights(brain)

    def decide(self, data):
        return self.model.predict(data)

    def copy(self):
        return self.model.get_weights()
Esempio n. 13
0
def run_tf_example4(verbose):
    if (verbose):
        print("\n***RUNNING TENSORFLOW***")
    #print needed values.
    np.set_printoptions(precision=5)

    #Create a feed forward network
    model = Sequential()
    model.add(layers.Conv2D(1, 2, input_shape=(3, 3, 1), activation='sigmoid'))
    model.add(layers.Flatten())
    model.add(layers.Dense(2, activation='sigmoid'))
    model.add(layers.Dense(2, activation='sigmoid'))

    l1, l3, l4, input, output = generateExample4()

    #TF dimensions are W,H,N,C
    l1[0] = l1[0].reshape(2, 2, 1, 1)

    model.layers[0].set_weights(
        l1)  #Shape of weight matrix is (w,h,input_channels,kernels)

    #setting weights and bias of fully connected layers.
    model.layers[2].set_weights(l3)
    model.layers[3].set_weights(l4)

    #Setting input. Tensor flow is expecting a 4d array since the first dimension is the batch size (here we set it to one), and third dimension is channels
    input = input.reshape(1, 3, 3, 1)

    sgd = optimizers.SGD(lr=0.5)
    # loss = tf.keras.losses.CategoricalCrossentropy()
    loss = tf.keras.losses.MeanSquaredError()

    if (verbose):
        print_model_info(model, input, output, loss)
        print('\ntraining...')

    model.compile(loss=loss, optimizer=sgd, metrics=['accuracy'])
    history = model.fit(input, output, batch_size=1, epochs=1, verbose=verbose)

    if (verbose):
        print_model_info(model, input, output, loss)
        print('loss: ', history.history['loss'])  #why do the losses not match?

    l1k = np.squeeze([
        model.get_weights()[0][:, :, 0, 0],
        model.get_weights()[0][:, :, 0, 0]
    ])
    l1b = np.squeeze(model.get_weights()[1][:])

    l3 = np.squeeze(model.get_weights()[2])
    l3b = np.squeeze(model.get_weights()[3])

    l4 = np.squeeze(model.get_weights()[4])
    l4b = np.squeeze(model.get_weights()[5])

    return l1k, l1b, l3, l3b, l4, l4b
Esempio n. 14
0
def test_cosineconvolution_2d_correctness():
    if data_format == 'channels_first':
        X = np.random.randn(1, 3, 5, 5)
        input_dim = (3, 5, 5)
        W0 = X[:, :, ::-1, ::-1]
    elif data_format == 'channels_last':
        X = np.random.randn(1, 5, 5, 3)
        input_dim = (5, 5, 3)
        W0 = X[0, :, :, :, None]

    model = Sequential()
    model.add(
        CosineConvolution2D(1, (5, 5),
                            use_bias=True,
                            input_shape=input_dim,
                            data_format=data_format))
    model.compile(loss='mse', optimizer='rmsprop')
    W = model.get_weights()
    W[0] = W0
    W[1] = np.asarray([1.])
    model.set_weights(W)
    out = model.predict(X)
    assert_allclose(out, np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)

    model = Sequential()
    model.add(
        CosineConvolution2D(1, (5, 5),
                            use_bias=False,
                            input_shape=input_dim,
                            data_format=data_format))
    model.compile(loss='mse', optimizer='rmsprop')
    W = model.get_weights()
    W[0] = -2 * W0
    model.set_weights(W)
    out = model.predict(X)
    assert_allclose(out, -np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
Esempio n. 15
0
def def_NN_default_design(X_train_,
                          y_train_,
                          X_test_,
                          df_weight_,
                          units_,
                          epochs_,
                          batch_size_,
                          pathway_layer_=False,
                          second_layer_=False):

    K.clear_session()
    model_default = Sequential()
    model_default.add(
        Dense(units=units_,
              input_dim=len(X_train_[0]),
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              activation='tanh',
              name='layer1'))
    if (pathway_layer_ == True):
        #print('set_weight applied!!')
        model_default.set_weights([
            model_default.get_weights()[0] * np.array(df_weight_),
            np.zeros((units_, ))
        ])

    if (second_layer_ == True):
        #print('second layer applied!!')
        model_default.add(Dense(100, activation='tanh', name='layer2'))

    model_default.add(Dense(16, activation='softmax', name='layer3'))

    sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9,
                         nesterov=True)  # the parameter from paper
    model_default.compile(optimizer=sgd,
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])
    model_default.fit(X_train_,
                      y_train_,
                      epochs=epochs_,
                      batch_size=batch_size_,
                      verbose=0)
    y_pred_ = model_default.predict(X_test_)
    K.clear_session()

    return (model_default, y_pred_)
def model(x_train, y_train, x_test, y_test):
    """Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Dropout, Activation
    from tensorflow.keras.optimizers import RMSprop

    keras_model = Sequential()
    keras_model.add(Dense(512, input_shape=(784, )))
    keras_model.add(Activation('relu'))
    keras_model.add(Dropout({{uniform(0, 1)}}))
    keras_model.add(Dense({{choice([256, 512, 1024])}}))
    keras_model.add(Activation('relu'))
    keras_model.add(Dropout({{uniform(0, 1)}}))
    keras_model.add(Dense(10))
    keras_model.add(Activation('softmax'))

    rms = RMSprop()
    keras_model.compile(loss='categorical_crossentropy',
                        optimizer=rms,
                        metrics=['acc'])

    keras_model.fit(x_train,
                    y_train,
                    batch_size={{choice([64, 128])}},
                    epochs=1,
                    verbose=2,
                    validation_data=(x_test, y_test))
    score, acc = keras_model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {
        'loss': -acc,
        'status': STATUS_OK,
        'model': keras_model.to_yaml(),
        'weights': pickle.dumps(keras_model.get_weights())
    }
Esempio n. 17
0
    def build(self, hp):
        model = Sequential()

        model.add(
            Dense(units=self.units_,
                  input_dim=self.input_dim_,
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros',
                  activation='tanh',
                  name='layer1'))
        if (len(self.df_weight_) > 0):
            #print('set_weight applied!!')
            model.set_weights([
                model.get_weights()[0] * np.array(self.df_weight_),
                np.zeros((self.units_, ))
            ])
        if (self.second_layer_ == True):
            #print('second layer applied!!')
            for i in range(hp.Int('n_layers', 1,
                                  1)):  # adding variation of layers.
                model.add(
                    Dense(
                        hp.Int(f'layer_{i}_units',
                               min_value=0,
                               max_value=200,
                               step=50)))
                model.add(Activation('tanh'))

        model.add(
            Dense(self.output_classes_, activation='softmax', name='layer3'))

        hp_learning_rate = hp.Choice('learning_rate',
                                     values=[0.0001, 0.001, 0.01, 0.1, 0.2])
        hp_momentum = hp.Choice('momentum',
                                values=[0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
        hp_decay = hp.Choice('decay', values=[1e-4, 1e-5, 1e-6, 1e-7, 1e-8])

        model.compile(optimizer=optimizers.SGD(learning_rate=hp_learning_rate,
                                               momentum=hp_momentum,
                                               decay=hp_decay),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        return model
Esempio n. 18
0
def test_model(epochs, batch_size, title, lst_layers, x_train_t, x_val_t,
               y_train_t, y_val_t):

    model_t = Sequential(lst_layers)
    model_t.compile(optimizer='adam',
                    loss="categorical_crossentropy",
                    metrics=['categorical_accuracy'])

    model_t.summary()
    w_save = model_t.get_weights()
    history = model_t.fit(x_train_t,
                          y_train_t,
                          batch_size=batch_size,
                          epochs=epochs,
                          validation_data=(x_val_t, y_val_t))

    acc = history.history['categorical_accuracy']
    val_acc = history.history['val_categorical_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']

    epochs = range(1, len(acc) + 1)

    # Train an validation accuracy
    plt.plot(epochs, acc, "b", label='Training accuracy')
    plt.plot(epochs, val_acc, "r", label='Validation accuracy')
    plt.title(f'Training and validation accuracy {title}{batch_size}')
    plt.legend()
    plt.figure()
    plt.show()

    # Train an validation loss
    plt.plot(epochs, loss, "b", label='Training loss')
    plt.plot(epochs, val_loss, "r", label='Validation loss')
    plt.title(f'Training and validation loss {title}{batch_size}')
    plt.legend()
    plt.figure()

    plt.show()
    model_t.set_weights(w_save)
    return acc, val_acc, loss, val_loss
Esempio n. 19
0
def test_triplet_network():

    X = np.zeros(shape=(10, 5))
    embedding_dims = 3

    base_model = Sequential()
    base_model.add(Dense(8, input_shape=(X.shape[-1], )))

    model, _, _, _ = triplet_network(base_model,
                                     embedding_dims=embedding_dims,
                                     embedding_l2=0.1)
    encoder = model.layers[3]

    assert model.layers[3].output_shape == (None, 3)
    assert np.all(base_model.get_weights()[0] == encoder.get_weights()[0])
    assert np.all([
        isinstance(layer, keras.layers.InputLayer)
        for layer in model.layers[:3]
    ])

    assert encoder.output_shape == (None, embedding_dims)
    def train_model(self):
        model = Sequential([
            Conv2D(filters=32,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   input_shape=(224, 224, 3)),
            MaxPool2D(pool_size=(2, 2), strides=2),
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same'),
            MaxPool2D(pool_size=(2, 2), strides=2),
            Flatten(),
            Dense(units=2, activation='softmax')
        ])
        model.summary()

        model.compile(
            optimizer=Adam(learning_rate=self.client_config.learning_rate),
            loss='categorical_crossentropy',
            metrics=['accuracy'])
        if self.model_params is not None:
            print('Using model weights from central node')
            model.set_weights(self.model_params)
        else:
            print('Using default model weights')

        self.__create_temp_dataset_folder()
        train_batches, valid_batches = self.__load_datasets()

        model.fit(x=train_batches,
                  steps_per_epoch=10,
                  epochs=self.client_config.epochs,
                  validation_data=valid_batches,
                  validation_steps=5,
                  verbose=2)

        self.__clean_temp_dataset_folder()
        return model.get_weights()
Esempio n. 21
0
          activation='relu',
          kernel_initializer=initializer,
          input_shape=(num_of_neurons, )),
    Dense(num_of_neurons, activation='relu', kernel_initializer=initializer),
    Dense(num_of_neurons, activation='relu', kernel_initializer=initializer),
    Dense(num_of_neurons, activation='relu', kernel_initializer=initializer),
    Dense(10, activation='softmax', kernel_initializer=initializer)
])
net2.compile(Adam(lr=rho / tau),
             loss='categorical_crossentropy',
             metrics=['accuracy'])
net1_last = tf.keras.models.clone_model(net1)
net2_last = tf.keras.models.clone_model(net2)
for i in range(ITER):
    print("i=", i)
    net1_last.set_weights(net1.get_weights())
    net1.fit(x_train, q1, steps_per_epoch=500, epochs=1, verbose=0)
    net2_last.set_weights(net2.get_weights())
    net2.fit(p2, y_train, steps_per_epoch=500, epochs=1, verbose=0)
    train_acc[i], train_cost[i] = test_accuracy(net1, net2, x_train, y_train)
    if i > 0 and train_cost[i] - train_cost[i - 1] > 0.1:
        net1.set_weights(net1_last.get_weights())
        net2.set_weights(net2_last.get_weights())
        train_acc[i], train_cost[i] = test_accuracy(net1, net2, x_train,
                                                    y_train)
    with tf.GradientTape() as tape:
        tape.watch(p2)
        outputs = net2(p2)
        obj = cross_entropy(y_train, outputs) + rho / 2 * tf.reduce_sum(
            (p2 - q1) *
            (p2 - q1)) + tf.linalg.trace(tf.matmul(tf.transpose(u1), p2 - q1))
Esempio n. 22
0
#==================================================================
#********************  Saving the regressor  **********************
#==================================================================

name  = 'lna28'
addr = '/home/mostafa/workarea_POSH/RF_FrontEnd/reg_files/LNA/'

reg_json=reg.to_json()
with open(addr+'model_'+name+'.json', "w") as json_file:
    json_file.write(reg_json)
reg.save_                                                                                                                                                                                                       (addr+'reg_'+name+'.h5')  

from sklearn.externals import joblib
joblib.dump(sc_X, addr+'scX_'+name+'.pkl') 
joblib.dump(sc_y, addr+'scY_'+name+'.pkl')
pickle.dump( reg.get_weights(), open( addr+'w8_'+name+'.p', "wb" ) )


#==================================================================
#********************  Loading the regressor  *********************
#==================================================================
"""
from sklearn.externals import joblib
from keras.models import model_from_json 
json_file = open('model_vco65.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
reg = model_from_json(loaded_model_json)
reg.load_weights('reg_vco65.h5')

Sc_X = joblib.load('scX_vco65.pkl') 
# training the model #

model.fit(x_train, y_train, batch_size=32, epochs=10, shuffle=True)

##############################
# saving & loading the model #

# save
try:
    model.save('result/model_saved')
    print('Succeeded to save')
except ValueError:
    print('Failed to save.')

# shuffle
weights = model.get_weights()
weights = [np.random.permutation(w.flat).reshape(w.shape) for w in weights]
model.set_weights(weights)

# load
try:
    model = load_model('result/model_saved')
    print('Succeeded to load')
except ValueError:
    print('Failed to load.')

########################
# evaluating the model #

model.evaluate(x_test, y_test, batch_size=1000)
Esempio n. 24
0
# model.add( Conv2D( 512, 3, padding="same" ) )
# model.add( Conv2D( 512, 3, padding="same" ) )
# model.add( Conv2D( 512, 3, padding="same" ) )
# model.add( MaxPooling2D( pool_size=(2, 2) ) )
model.add( Flatten() )
model.add( Dense( 100 ) )
model.add( Dense( 100 ) )
model.add( Dense( 1 ) )
model.add( Activation( "sigmoid" ) )

m = model( tf.ones( ( batch_size, img_width, img_height, 3 ) ) )

# model.summary()
# print( model.get_weights() )

model.compile(
    optimizer="adam",
    loss="binary_crossentropy",
    metrics=[ "accuracy" ]
)

model.fit( train_generator, batch_size=batch_size, epochs=epochs, steps_per_epoch=training_samples, validation_data=validation_generator, validation_steps=validation_samples, verbose=1 )

# score = model.evaluate( test_generator, batch_size=batch_size, epochs=epochs, steps_per_epoch=training_samples, verbose=1 )

# print( score )

model.summary()
print( model.get_weights() )

Esempio n. 25
0
def nn(tr_list, imdb_tr_list, te_list, imdb_te_list):
    TR_SAMPLE_SIZE = len(imdb_tr_list)
    TR_PROBA = len(tr_list)

    for idx in range(len(tr_list)):
        assert len(tr_list[idx]) == TR_SAMPLE_SIZE, "train mismatch samples"

    train_x = np.zeros([TR_SAMPLE_SIZE, TR_PROBA], dtype=np.float64)
    train_y = np.array([])

    for idx in range(TR_SAMPLE_SIZE):
        ll = imdb_tr_list[idx]
        fn = ll[0]
        label = int(ll[1])

        x = []
        for i in range(TR_PROBA):
            x.append(tr_list[i][fn])

        x_ = np.array(x)
        train_x[idx] = x_
        train_y = np.append(train_y, label)

    model = Sequential()
    model.add(
        Dense(
            1,
            activation=None,
            use_bias=False,
            kernel_regularizer=tf.keras.regularizers.l2(L2_ETA[0]),
            #                    kernel_regularizer=tf.keras.regularizers.l1(L1_ETA),
            kernel_constraint=tf.keras.constraints.NonNeg(),
            input_shape=(TR_PROBA, )))
    model.add(Dense(1, activation="sigmoid"))

    #model.summary()
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy', 'binary_crossentropy'])

    hist = model.fit(train_x,
                     train_y,
                     epochs=EPOCH,
                     batch_size=BATCH_SIZE,
                     shuffle=False,
                     verbose=0)

    weights = model.get_weights()

    #print("trained weights: ", weights)

    # prediction

    TE_SAMPLE_SIZE = len(imdb_te_list)
    TE_PROBA = len(te_list)

    for idx in range(len(te_list)):
        assert len(te_list[idx]) == TE_SAMPLE_SIZE, "test mismatch samples"

    x_predict = np.zeros([len(imdb_te_list), TE_PROBA], dtype=np.float64)
    y_actual = list()

    for idx in range(TE_SAMPLE_SIZE):
        ll = imdb_te_list[idx]
        fn = ll[0]
        label = int(ll[1])

        x = []
        for i in range(TE_PROBA):
            x.append(te_list[i][fn])

        x_ = np.array(x)
        x_predict[idx] = x_
        y_actual.append(label)

    y_pred = model.predict(x_predict)

    correct_pred = 0
    wrong_pred = 0

    for idx in range(TE_SAMPLE_SIZE):
        if (y_pred[idx][0] <= 0.5
                and y_actual[idx] == 0) or (y_pred[idx][0] > 0.5
                                            and y_actual[idx] == 1):
            correct_pred = correct_pred + 1
        else:
            wrong_pred = wrong_pred + 1

    assert (correct_pred + wrong_pred) == TE_SAMPLE_SIZE, "mismatch size"

    _acc = float(correct_pred) / float(TE_SAMPLE_SIZE)
    #print("Accuracy: ", float(correct_pred)/float(TE_SAMPLE_SIZE))
    tr_acc = hist.history['acc'][EPOCH - 1] * 100.
    te_acc = _acc * 100.
    return tr_acc, te_acc, weights
Esempio n. 26
0
                               tf.constant(sample["rew"].ravel()),
                               tf.constant(sample["done"].ravel()), discount,
                               tf.constant(env.action_space.n))
        absTD = tf.math.abs(target_Q - Q)
        loss = tf.reduce_mean(loss_func(absTD) * weights)

    grad = tape.gradient(loss, model.trainable_weights)
    optimizer.apply_gradients(zip(grad, model.trainable_weights))
    tf.summary.scalar("Loss vs training step", data=loss, step=n_step)

    if prioritized:
        Q = Q_func(model, tf.constant(sample["obs"]),
                   tf.constant(sample["act"].ravel()),
                   tf.constant(env.action_space.n))
        absTD = tf.math.abs(target_Q - Q)
        rb.update_priorities(sample["indexes"], absTD)

    if done:
        observation = env.reset()
        rb.on_episode_end()
        n_episode += 1

    if n_step % target_update_freq == 0:
        target_model.set_weights(model.get_weights())

    if n_step % eval_freq == eval_freq - 1:
        eval_rew = evaluate(model, eval_env)
        tf.summary.scalar("episode reward vs training step",
                          data=eval_rew,
                          step=n_step)
# print(output)
print(model.predict(img))

# print('1st convolutional layer, 1st kernel weights:')
# print(np.squeeze(model.get_weights()[0][:,:,0,0]))
# print('1st convolutional layer, 1st kernel bias:')
# print(np.squeeze(model.get_weights()[1][0]))

# print('fully connected layer weights:')
# print(np.squeeze(model.get_weights()[2]))
# print('fully connected layer bias:')
# print(np.squeeze(model.get_weights()[3][0]))


sgd = optimizers.SGD(lr=1)

model.compile(loss='MSE', optimizer=sgd, metrics=['accuracy'])
history=model.fit(img,output,batch_size=1,epochs=1)
print('model output after:')
print(model.predict(img))

print('1st convolutional layer, 1st kernel weights:')
print(np.squeeze(model.get_weights()[0][:,:,0,0]))
print('1st convolutional layer, 1st kernel bias:')
print(np.squeeze(model.get_weights()[1][0]))

print('fully connected layer weights:')
print(np.squeeze(model.get_weights()[2]))
print('fully connected layer bias:')
print(np.squeeze(model.get_weights()[3][0]))
Esempio n. 28
0
                activation=keras.activations.sigmoid,
                ))
model.add(Dense(3,
                activation=keras.activations.sigmoid,
                ))

model.compile(
              optimizer=tf.train.AdamOptimizer(0.001),
              # loss=keras.losses.categorical_crossentropy,
              loss=keras.losses.mse,
              metrics=[keras.metrics.binary_accuracy]
              )

# This is the process I used to train my weights
model.fit(x_train, y_train, epochs=2000)
myWeights = model.get_weights()
np.set_printoptions(suppress=True)
np.set_printoptions(precision=2)
print('myWeights =', myWeights)

# These are the weights I got, pretty-printed
# myWeights = [
# #     # first layer, 7x8
#     array([[ 1.2 , -1.16, -1.97,  2.16,  0.97,  0.86, -1.2 ,  1.12],
#        [ 1.21, -1.17, -1.97,  2.16,  0.84,  0.76, -1.19,  1.22],
#        [ 1.19, -1.2 , -1.98,  2.15,  0.87,  0.84, -1.19,  1.13],
#        [ 1.21, -1.2 , -1.97,  2.15,  0.89,  0.8 , -1.2 ,  1.16],
#        [ 1.21, -1.12, -1.97,  2.16,  0.99,  0.8 , -1.21,  1.18],
#        [ 1.23, -1.09, -1.98,  2.15,  1.12,  0.81, -1.24,  1.13],
#        [ 1.24, -1.11, -1.99,  2.14,  1.  ,  0.77, -1.23,  1.17]],
#       dtype=float32),
Esempio n. 29
0
class LinearEvent(object):
    """ this is the base clase of the event model """

    def __init__(self, d, var_df0=None, var_scale0=None, optimizer=None, n_epochs=10, init_model=False,
                 kernel_initializer='glorot_uniform', l2_regularization=0.00, batch_size=32, prior_log_prob=None,
                 reset_weights=False, batch_update=True, optimizer_kwargs=None, variance_prior_mode=None, 
                 variance_window=None):
        """

        :param d: dimensions of the input space
        """
        self.d = d
        self.f_is_trained = False
        self.f0_is_trained = False
        self.f0 = np.zeros(d)

        #### ~~~ Variance Prior Parameters ~~~~ ###
        # in practice, only the mode of the variance prior
        # matters at all. Changing the df/scale but maintaining
        # the mode has no effect on the model behavior or the 
        # implied variance. (It does facilitate magnitude of the 
        # log likelihood, but not the dynamic range of the 
        # log-likelihoods). As such, it is convient to fix the
        # prior df and solve for the scale for some desired variance.
        
        
        # allow for set DF and set scale to override the variance prior mode
        if (var_df0 is not None) and (var_scale0 is not None):
            variance_prior_mode = var_df0 / (var_df0 + 2) * var_scale0
        
        elif variance_prior_mode is None:
            # in simulations, it is often convient to approximately 
            # normalize the stim to unit length, or 
            # X ~ N(0, (1/d) * I)
            variance_prior_mode = 1 / d 
        self.variance_prior_mode = variance_prior_mode

        if var_df0 is None:
            var_df0 = 1
        self.var_df0 = var_df0

        if var_scale0 is None:
            var_scale0 = get_prior_scale(self.var_df0, variance_prior_mode)
        self.var_scale0 = var_scale0

        # also set a default prior log probability, inferred from the prior variance
        # new way!! evaluate the probability of the trace as given a zero mean gaussian
        # vector with the variance prior mode
        # if prior_log_prob is None:
            # # this is a decent approximation of what a random normalized vector would
            # # under the generative process of X ~ N(0, var_scale0 * I),
            # # which gives (in expectation) unit vectors
            # 
            # # note, norm uses standard deviation, not variance
            # prior_log_prob = norm(0, variance_prior_mode ** 0.5).logpdf(
            #     variance_prior_mode ** 0.5) * d
            
        self.prior_probability = prior_log_prob

        # how many observations do we consider in calculating the variance?
        if variance_window is None:
            variance_window = int(1e6) # this is plausiblely large...
        self.variance_window = variance_window
        
        #### ~~~ END Variance Prior Parameters ~~~~ ###

        self.x_history = [np.zeros((0, self.d))]

        if (optimizer is None) and (optimizer_kwargs is None):
            optimizer = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, amsgrad=False)
        elif (optimizer is None) and not (optimizer_kwargs is None):
            optimizer = Adam(**optimizer_kwargs)
        elif (optimizer is not None) and (type(optimizer) != str):
            optimizer = optimizer()

        self.compile_opts = dict(optimizer=optimizer, loss='mean_squared_error')
        self.kernel_initializer = kernel_initializer
        self.kernel_regularizer = regularizers.l2(l2_regularization)
        self.n_epochs = int(n_epochs)
        self.batch_size = int(batch_size)

        self.d = d
        self.reset_weights = reset_weights
        self.batch_update = batch_update
        self.training_pairs = []
        self.prediction_errors = np.zeros((0, self.d), dtype=np.float)
        self.model_weights = None

        # initialize the covariance with the mode of the prior distribution
        self.Sigma = np.ones(d) * var_df0 * var_scale0 / (var_df0 + 2)

        self.is_visited = False  # governs the special case of model's first prediction (i.e. with no experience)

        # switch for inheritance -- don't want to init the model for sub-classes
        if init_model:
            self.init_model()

        # generate a vector to act as a placeholder for time-points
        # prior to the start of the event so as to allow the network 
        # to implicitly learn a hidden state. We'll assume this
        # vector has length appox equal to 1
        self.filler_vector = np.random.randn(self.d) / np.sqrt(self.d)

    def clear(self):
        delete_object_attributes(self)

    def init_model(self):
        self._compile_model()
        self.model_weights = self.model.get_weights()
        return self.model

    def _compile_model(self):
        self.model = Sequential([
            Dense(self.d, input_shape=(self.d,), use_bias=True, kernel_initializer=self.kernel_initializer,
                  kernel_regularizer=self.kernel_regularizer),
            Activation('linear')
        ])
        self.model.compile(**self.compile_opts)

    def set_model(self, model):
        self.model = model
        self.do_reset_weights()

    def reestimate(self):
        self.do_reset_weights()
        self.estimate()

    def do_reset_weights(self):
        new_weights = [
            self.model.layers[0].kernel_initializer(w.shape)
                for w in self.model.get_weights()
        ]
        self.model.set_weights(new_weights)
        self.model_weights = self.model.get_weights()

    def update(self, X, Xp, update_estimate=True):
        """
        Parameters
        ----------
        X: NxD array-like data of inputs

        y: NxD array-like data of outputs

        Returns
        -------
        None

        """
        if X.ndim > 1:
            X = X[-1, :]  # only consider last example
        assert X.ndim == 1
        assert X.shape[0] == self.d
        assert Xp.ndim == 1
        assert Xp.shape[0] == self.d

        x_example = X.reshape((1, self.d))
        xp_example = Xp.reshape((1, self.d))

        # concatenate the training example to the active event token
        self.x_history[-1] = np.concatenate([self.x_history[-1], x_example], axis=0)

        # also, create a list of training pairs (x, y) for efficient sampling
        #  picks  random time-point in the history
        self.training_pairs.append(tuple([x_example, xp_example]))

        if update_estimate:
            self.estimate()
            self.f_is_trained = True

    def update_f0(self, Xp, update_estimate=True):
        self.update(self.filler_vector, Xp, update_estimate=update_estimate)
        self.f0_is_trained = True

        # precompute f0 for speed
        self.f0 = self._predict_f0()

    def get_variance(self):
        # Sigma is stored as a vector corresponding to the entries of the diagonal covariance matrix
        return self.Sigma

    def predict_next(self, X):
        """
        wrapper for the prediction function that changes the prediction to the identity function
        for untrained models (this is an initialization technique)

        """
        if not self.f_is_trained:
            if np.ndim(X) > 1:
                return np.copy(X[-1, :]).reshape(1, -1)
            return np.copy(X).reshape(1, -1)

        return self._predict_next(X)

    def _predict_next(self, X):
        """
        Parameters
        ----------
        X: 1xD array-like data of inputs

        Returns
        -------
        y: 1xD array of prediction vectors

        """
        if X.ndim > 1:
            X0 = X[-1, :]
        else:
            X0 = X
 
        self.model.set_weights(self.model_weights)
        return self.model.predict(np.reshape(X0, newshape=(1, self.d)))

    def predict_f0(self):
        """
        wrapper for the prediction function that changes the prediction to the identity function
        for untrained models (this is an initialization technique)

        N.B. This answer is cached for speed

        """
        return self.f0

    def _predict_f0(self):
        return self._predict_next(self.filler_vector)

    def log_likelihood_f0(self, Xp):

        if not self.f0_is_trained:
            if self.prior_probability:
                return self.prior_probability
            else: 
                return norm(0, self.variance_prior_mode ** 0.5).logpdf(Xp).sum()

        # predict the initial point (# this has been precomputed for speed)
        Xp_hat = self.predict_f0()

        # return the probability
        return fast_mvnorm_diagonal_logprob(Xp.reshape(-1) - Xp_hat.reshape(-1), self.Sigma)

    def log_likelihood_next(self, X, Xp):
        if not self.f_is_trained:
            if self.prior_probability:
                return self.prior_probability
            else: 
                return norm(0, self.variance_prior_mode ** 0.5).logpdf(Xp).sum()

        Xp_hat = self.predict_next(X)
        LL = fast_mvnorm_diagonal_logprob(Xp.reshape(-1) - Xp_hat.reshape(-1), self.Sigma)
        return Xp_hat, LL

    def log_likelihood_sequence(self, X, Xp):
        if not self.f_is_trained:
            if self.prior_probability:
                return self.prior_probability
            else: 
                return norm(0, self.variance_prior_mode ** 0.5).logpdf(Xp).sum()

        Xp_hat = self.predict_next_generative(X)
        return fast_mvnorm_diagonal_logprob(Xp.reshape(-1) - Xp_hat.reshape(-1), self.Sigma)

    # create a new cluster of scenes
    def new_token(self):
        if len(self.x_history) == 1 and self.x_history[0].shape[0] == 0:
            # special case for the first cluster which is already created
            return
        self.x_history.append(np.zeros((0, self.d)))

    def predict_next_generative(self, X):
        self.model.set_weights(self.model_weights)
        # the LDS is a markov model, so these functions are the same
        return self.predict_next(X)

    def run_generative(self, n_steps, initial_point=None):
        self.model.set_weights(self.model_weights)
        if initial_point is None:
            x_gen = self._predict_f0()
        else:
            x_gen = np.reshape(initial_point, (1, self.d))
        for ii in range(1, n_steps):
            x_gen = np.concatenate([x_gen, self.predict_next_generative(x_gen[:ii, :])])
        return x_gen

    def estimate(self):
        if self.reset_weights:
            self.do_reset_weights()
        else:
            self.model.set_weights(self.model_weights)

        n_pairs = len(self.training_pairs)

        if self.batch_update:
            def draw_sample_pair():
                # draw a random cluster for the history
                idx = np.random.randint(n_pairs)
                return self.training_pairs[idx]
        else:
            # for online sampling, just use the last training sample
            def draw_sample_pair():
                return self.training_pairs[-1]

        # run batch gradient descent on all of the past events!
        for _ in range(self.n_epochs):

            # draw a set of training examples from the history
            x_batch = []
            xp_batch = []
            for _ in range(self.batch_size):

                x_sample, xp_sample = draw_sample_pair()

                # these data aren't
                x_batch.append(x_sample)
                xp_batch.append(xp_sample)

            x_batch = np.reshape(x_batch, (self.batch_size, self.d))
            xp_batch = np.reshape(xp_batch, (self.batch_size, self.d))
            self.model.train_on_batch(x_batch, xp_batch)

        # cache the model weights
        self.model_weights = self.model.get_weights()

        # Update Sigma
        x_train_0, xp_train_0 = self.training_pairs[-1]
        xp_hat = self.model.predict(x_train_0)
        self.prediction_errors = np.concatenate([self.prediction_errors, xp_train_0 - xp_hat], axis=0)
        # remove old observations from consideration of the variance
        t = np.max([0, np.shape(self.prediction_errors)[0] - self.variance_window])
        self.prediction_errors = self.prediction_errors[t:, :]

        if np.shape(self.prediction_errors)[0] > 1:
            self.Sigma = map_variance(self.prediction_errors, self.var_df0, self.var_scale0)
Esempio n. 30
0
class RecurrentLinearEvent(LinearEvent):

    # RNN which is initialized once and then trained using stochastic gradient descent
    # i.e. each new scene is a single example batch of size 1

    def __init__(self, d, var_df0=None, var_scale0=None, t=3,
                 optimizer=None, n_epochs=10, l2_regularization=0.00, batch_size=32,
                 kernel_initializer='glorot_uniform', init_model=False, prior_log_prob=None, reset_weights=False,
                 batch_update=True, optimizer_kwargs=None,variance_prior_mode=None, 
                 variance_window=None):

        LinearEvent.__init__(self, d, var_df0=var_df0, var_scale0=var_scale0,
                             optimizer=optimizer, n_epochs=n_epochs,
                             init_model=False, kernel_initializer=kernel_initializer,
                             l2_regularization=l2_regularization, prior_log_prob=prior_log_prob,
                             reset_weights=reset_weights, batch_update=batch_update, 
                             optimizer_kwargs=optimizer_kwargs, variance_prior_mode=variance_prior_mode,
                             variance_window=variance_window)

        self.t = t
        self.n_epochs = n_epochs

        # list of clusters of scenes:
        # each element of list = history of scenes for given cluster
        # history = N x D tensor, N = # of scenes in cluster, D = dimension of single scene
        #
        self.x_history = [np.zeros((0, self.d))]
        self.batch_size = batch_size

        if init_model:
            self.init_model()

        # cache the initial weights for retraining speed
        self.init_weights = None
        # generate a vector to act as a placeholder for time-points
        # prior to the start of the event so as to allow the network 
        # to implicitly learn a hidden state. We'll assume this
        # vector has length appox equal to 1
        self.filler_vector = np.random.randn(self.d) / np.sqrt(self.d)

    def do_reset_weights(self):
        # # self._compile_model()
        if self.init_weights is None:
            new_weights = [
                self.model.layers[0].kernel_initializer(w.shape)
                 for w in self.model.get_weights()
            ]
            self.model.set_weights(new_weights)
            self.model_weights = self.model.get_weights()
            self.init_weights = self.model.get_weights()
        else:
            self.model.set_weights(self.init_weights)

    # initialize model once so we can then update it online
    def _compile_model(self):
        self.model = Sequential()
        self.model.add(SimpleRNN(self.d, input_shape=(None, self.d),
                                 activation=None, kernel_initializer=self.kernel_initializer,
                                 kernel_regularizer=self.kernel_regularizer))
        self.model.compile(**self.compile_opts)

    # concatenate current example with the history of the last t-1 examples
    # this is for the recurrent layer
    #
    def _unroll(self, x_example):
        x_train = np.concatenate([self.x_history[-1][-(self.t - 1):, :], x_example], axis=0)
        # x_train = np.concatenate([self.filler_vector, x_train], axis=0)
        x_train = x_train.reshape((1, np.min([x_train.shape[0], self.t]), self.d))
        return x_train

    # predict a single example
    def _predict_next(self, X):
        self.model.set_weights(self.model_weights)
        # Note: this function predicts the next conditioned on the training data the model has seen

        if X.ndim > 1:
            X = X[-1, :]  # only consider last example
        assert np.ndim(X) == 1
        assert X.shape[0] == self.d

        x_test = X.reshape((1, self.d))

        # concatenate current example with history of last t-1 examples
        # this is for the recurrent part of the network
        x_test = self._unroll(x_test)
        return self.model.predict(x_test)

    def _predict_f0(self):
        return self.predict_next_generative(self.filler_vector)

    def _update_variance(self):
        if np.shape(self.prediction_errors)[0] > 1:
            self.Sigma = map_variance(self.prediction_errors, self.var_df0, self.var_scale0)

    def update(self, X, Xp, update_estimate=True):
        if X.ndim > 1:
            X = X[-1, :]  # only consider last example
        assert X.ndim == 1
        assert X.shape[0] == self.d
        assert Xp.ndim == 1
        assert Xp.shape[0] == self.d

        x_example = X.reshape((1, self.d))
        xp_example = Xp.reshape((1, self.d))

        # concatenate the training example to the active event token
        self.x_history[-1] = np.concatenate([self.x_history[-1], x_example], axis=0)

        # also, create a list of training pairs (x, y) for efficient sampling
        #  picks  random time-point in the history
        _n = np.shape(self.x_history[-1])[0]
        x_train_example = np.reshape(
                    unroll_data(self.x_history[-1][max(_n - self.t, 0):, :], self.t)[-1, :, :], (1, self.t, self.d)
                )
        self.training_pairs.append(tuple([x_train_example, xp_example]))

        if update_estimate:
            self.estimate()
            self.f_is_trained = True

    def predict_next_generative(self, X):
        self.model.set_weights(self.model_weights)
        X0 = np.reshape(unroll_data(X, self.t)[-1, :, :], (1, self.t, self.d))
        return self.model.predict(X0)

    # optional: run batch gradient descent on all past event clusters
    def estimate(self):
        if self.reset_weights:
            self.do_reset_weights()
        else:
            self.model.set_weights(self.model_weights)

        # get predictions errors for variance estimate *before* updating the 
        # neural networks.  For an untrained model, the prediction should be the
        # origin, deterministically

        # Update Sigma
        x_train_0, xp_train_0 = self.training_pairs[-1]
        xp_hat = self.model.predict(x_train_0)
        self.prediction_errors = np.concatenate([self.prediction_errors, xp_train_0 - xp_hat], axis=0)
                
        # remove old observations from consideration of the variance
        t = np.max([0, np.shape(self.prediction_errors)[0] - self.variance_window])
        self.prediction_errors = self.prediction_errors[t:, :]

        # update the variance
        self._update_variance()


        ## then update the NN
        n_pairs = len(self.training_pairs)

        if self.batch_update:
            def draw_sample_pair():
                # draw a random cluster for the history
                idx = np.random.randint(n_pairs)
                return self.training_pairs[idx]
        else:
            # for online sampling, just use the last training sample
            def draw_sample_pair():
                return self.training_pairs[-1]

        # run batch gradient descent on all of the past events!
        for _ in range(self.n_epochs):

            # draw a set of training examples from the history
            x_batch = np.zeros((0, self.t, self.d))
            xp_batch = np.zeros((0, self.d))
            for _ in range(self.batch_size):

                x_sample, xp_sample = draw_sample_pair()

                x_batch = np.concatenate([x_batch, x_sample], axis=0)
                xp_batch = np.concatenate([xp_batch, xp_sample], axis=0)

            self.model.train_on_batch(x_batch, xp_batch)
        self.model_weights = self.model.get_weights()
if os.path.isfile('models/medical_trial_model.h5') is False:
    try:
        os.mkdir(path)
    except OSError:
        print ("Creation of the directory %s failed" % path)
    else:
        print ("Successfully created the directory %s " % path)
    model.save('models/medical_trial_model.h5')

if os.path.isfile('models/medical_trial_model.h5') is False:
    model.save('models/medical_trial_model.h5')

# 3. model.save_weights()

# Checks first to see if file exists already.
# If not, the weights are saved to disk.
import os.path
if os.path.isfile('models/my_model_weights.h5') is False:
    model.save_weights('models/my_model_weights.h5')

model2 = Sequential([
    Dense(units=16, input_shape=(1,), activation='relu'),
    Dense(units=32, activation='relu'),
    Dense(units=2, activation='softmax')
])
print('*********************')
print('model2.load_weights')
model2.load_weights('models/my_model_weights.h5')
model2.get_weights()
print('*********************')
Esempio n. 32
0
model.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adam(lr=.001),metrics=['categorical_accuracy'])

#model.load_weights('Convolutional image classifier_Model4Reg.h5')






'''Prints out loss and accuracy on a test batch'''

#K=keras.models.Model.test_on_batch(model,x=x_test,y=y_test)
#print('Validation Loss: '+str(K[0])+ '\n'+'Validation Accuracy: '+str(K[1]))



# test the model and your weights
# model.fit(bin7, count3, epochs=1)
# model.set_weights(myWeights)
# predict3 = model.predict(bin7)
# np.set_printoptions(suppress=True)
# np.set_printoptions(precision=1)
# print('prediction =', predict3)



Examples = {
    'count3' : [ bin7, count3, model, myWeights ],
    'cifar10': [x_test,y_test,model,model.get_weights()]
}