Example #1
0
def test_saving_overwrite_option():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()
    new_weights = [np.random.random(w.shape) for w in org_weights]

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    model.set_weights(new_weights)

    with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
        ask.return_value = False
        save_model(model, fname, overwrite=False)
        ask.assert_called_once()
        new_model = load_model(fname)
        for w, org_w in zip(new_model.get_weights(), org_weights):
            assert_allclose(w, org_w)

        ask.return_value = True
        save_model(model, fname, overwrite=False)
        assert ask.call_count == 2
        new_model = load_model(fname)
        for w, new_w in zip(new_model.get_weights(), new_weights):
            assert_allclose(w, new_w)

    os.remove(fname)
def train():

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.2))
    model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    print(X_train.shape)
    print(y_train.shape)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test,
                                batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)

    with open("save_weight_lstm.pickle", mode="wb") as f:
        pickle.dump(model.get_weights(),f)
Example #3
0
def test_saving_overwrite_option_gcs():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()
    new_weights = [np.random.random(w.shape) for w in org_weights]

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(
            filename='test_saving_overwrite_option_gcs.h5')
        # we should not use same filename in several tests to allow for parallel
        # execution
        save_model(model, gcs_filepath)
        model.set_weights(new_weights)

        with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
            ask.return_value = False
            save_model(model, gcs_filepath, overwrite=False)
            ask.assert_called_once()
            new_model = load_model(gcs_filepath)
            for w, org_w in zip(new_model.get_weights(), org_weights):
                assert_allclose(w, org_w)

            ask.return_value = True
            save_model(model, gcs_filepath, overwrite=False)
            assert ask.call_count == 2
            new_model = load_model(gcs_filepath)
            for w, new_w in zip(new_model.get_weights(), new_weights):
                assert_allclose(w, new_w)

        file_io_proxy.delete_file(gcs_filepath)  # cleanup
Example #4
0
def test_preprocess_weights_for_loading_for_model(layer):
    model = Sequential([layer])
    weights1 = model.get_weights()
    weights2 = topology.preprocess_weights_for_loading(
        model, convert_weights(layer, weights1),
        original_keras_version='1')
    assert all([np.allclose(x, y, 1e-5)
                for (x, y) in zip(weights1, weights2)])
 def init_neural_networks(self):
     print "init start"
     model = Sequential()
     model.add(Dense(input_dim = self.inputSize,output_dim = 20,init="he_normal",activation = "tanh"))
     model.add(Dense(input_dim = 20,output_dim = 1,init="he_normal",activation = "tanh"))
     model.add(Dense(input_dim=1 , output_dim = 1, init="he_normal",activation = "linear"))
     model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')
     weights = model.get_weights()
     self.learner = model
     print "init end"
Example #6
0
def test_save_load_weights_gcs():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(
            filename='test_save_load_weights_gcs.h5')
        # we should not use same filename in several tests to allow for parallel
        # execution
        model.save_weights(gcs_filepath)
        model.set_weights([np.random.random(w.shape) for w in org_weights])
        for w, org_w in zip(model.get_weights(), org_weights):
            assert not (w == org_w).all()
        model.load_weights(gcs_filepath)
        for w, org_w in zip(model.get_weights(), org_weights):
            assert_allclose(w, org_w)

        file_io_proxy.delete_file(gcs_filepath)  # cleanup
Example #7
0
class brain:
    def __init__(self, model):
        if (model == None):
            self.model = Sequential()
            self.model.add(
                Dense(8, activation="tanh", input_dim=6,
                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            self.model.add(
                Dense(3, activation="tanh",
                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            self.model.compile(loss='mean_squared_error', optimizer='adam')
        else:
            self.model = model

    def getOutputs(self, inputs):
        inputs.append(1)
        return self.model.predict(np.asarray([inputs]))

    def mutate(self, brain1, brain2):
        newBrain = []
        for i in range(0, len(self.model.get_weights()), 2):
            newWeights = []
            b1weights = brain1.get_weights()[i]
            b2weights = brain2.get_weights()[i]
            for n in range(len(b1weights)):
                w = []
                for m in range(len(b1weights[0])):
                    r = random()
                    k = 0
                    if random() < 0.1:
                        k = randint(-100, 100) / 100

                    if (r < 0.4):
                        w.append(b1weights[n][m] + k)
                    elif r > 0.6:
                        w.append(b2weights[n][m] + k)
                    else:
                        w.append((b1weights[n][m] + b2weights[n][m]) / 2 + k)

                newWeights.append(w)
            newBrain.append(newWeights)
            newBrain.append(self.model.get_weights()[i + 1])
        self.model.set_weights(newBrain)
Example #8
0
 def compare_newapi(self, klayer, blayer, input_data, weight_converter=None,
                    is_training=False, rtol=1e-6, atol=1e-6):
     from keras.models import Sequential as KSequential
     from bigdl.nn.keras.topology import Sequential as BSequential
     bmodel = BSequential()
     bmodel.add(blayer)
     kmodel = KSequential()
     kmodel.add(klayer)
     koutput = kmodel.predict(input_data)
     from bigdl.nn.keras.layer import BatchNormalization
     if isinstance(blayer, BatchNormalization):
         k_running_mean = K.eval(klayer.running_mean)
         k_running_std = K.eval(klayer.running_std)
         blayer.set_running_mean(k_running_mean)
         blayer.set_running_std(k_running_std)
     if kmodel.get_weights():
         bmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
     bmodel.training(is_training)
     boutput = bmodel.forward(input_data)
     self.assert_allclose(boutput, koutput, rtol=rtol, atol=atol)
Example #9
0
class Brain:
    def __init__(self, model):
        if (model == None):
            self.model = Sequential()
            self.model.add(Dense(12, input_dim=6, activation="tanh",
                                 kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))

            # self.model.add(Dense(20, activation="tanh",
            #                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            # self.model.add(Dense(20, activation="tanh",
            #                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            # self.model.add(Dense(20, activation="tanh",
            #                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            self.model.add(Dense(3, activation="tanh",
                                 kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            self.model.compile(optimizer='sgd', loss='mean_squared_error')
        else:
            self.model = model

    def getOutputs(self, inputs):
        return self.model.predict(np.asarray([inputs]))

    def breed(self, brain1, brain2):
        newBrain = []
        for i in range(0, len(self.model.get_weights()), 2):
            newWeights = []
            b1weights = brain1.model.get_weights()[i]
            b2weights = brain2.model.get_weights()[i]
            for j in range(len(b1weights)):
                w = []
                for k in range(len(b1weights[0])):
                    r = random()
                    if r > 0.8:
                        genome = choice([b1weights[j][k], b2weights[j][k]])
                        w.append(genome + randint(-200, 200)/1000)
                    else:
                        w.append(choice([b1weights[j][k], b2weights[j][k]]))
                newWeights.append(w)
            newBrain.append(newWeights)
            newBrain.append(self.model.get_weights()[i + 1])
        self.model.set_weights(newBrain)
Example #10
0
 def test1():
   model = Sequential()
   model.add(Embedding(100,50,input_length=10,mask_zero=True))
   model.add(Sum(50,ave=True))
   model.compile(optimizer='sgd', loss='mse')
   a = model.predict(np.array([range(10)]))
   w = model.get_weights()[0]
   b = w[1:10,:].mean(0)
   if abs((a-b).sum())<1e-8:
     print("Behave as expectation")
   else:
     print("Something wrong")
Example #11
0
 def compare_layer(self, klayer, zlayer, input_data, weight_converter=None,
                   is_training=False, rtol=1e-6, atol=1e-6):
     """
     Compare forward results for Keras layer against Zoo Keras API layer.
     """
     from keras.models import Sequential as KSequential
     from zoo.pipeline.api.keras.models import Sequential as ZSequential
     zmodel = ZSequential()
     zmodel.add(zlayer)
     kmodel = KSequential()
     kmodel.add(klayer)
     koutput = kmodel.predict(input_data)
     from zoo.pipeline.api.keras.layers import BatchNormalization
     if isinstance(zlayer, BatchNormalization):
         k_running_mean = K.eval(klayer.running_mean)
         k_running_std = K.eval(klayer.running_std)
         zlayer.set_running_mean(k_running_mean)
         zlayer.set_running_std(k_running_std)
     if kmodel.get_weights():
         zmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
     zmodel.training(is_training)
     zoutput = zmodel.forward(input_data)
     self.assert_allclose(zoutput, koutput, rtol=rtol, atol=atol)
Example #12
0
def train():

    model = Sequential()
    model.add(Dense(output_dim=100, input_dim=28*28))
    model.add(Activation("relu"))
    model.add(Dense(output_dim=10))
    model.add(Activation("softmax"))

    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

    model.fit(X_train,y_train)

    with open("save_weight.pickle", mode="wb") as f:
        pickle.dump(model.get_weights(),f)
Example #13
0
def test_EarlyStopping_reuse():
    patience = 3
    data = np.random.random((100, 1))
    labels = np.where(data > 0.5, 1, 0)
    model = Sequential((
        Dense(1, input_dim=1, activation='relu'),
        Dense(1, activation='sigmoid'),
    ))
    model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
    stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
    weights = model.get_weights()

    hist = model.fit(data, labels, callbacks=[stopper])
    assert len(hist.epoch) >= patience

    # This should allow training to go for at least `patience` epochs
    model.set_weights(weights)
    hist = model.fit(data, labels, callbacks=[stopper])
    assert len(hist.epoch) >= patience
def train_model(feature_layers, classification_layers, image_list, nb_epoch, nb_classes, img_rows, img_cols, weights=None): 
    # Create testset data for cross-val
    num_images = len(image_list)
    test_size = int(0.2 * num_images)
    print("Train size: ", num_images-test_size)
    print("Test size: ", test_size)

    model = Sequential()
    for l in feature_layers + classification_layers:
        model.add(l)

    if not(weights is None):
        model.set_weights(weights)

    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    
    print('Using real time data augmentation')
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(num_images-test_size)
        for X_batch, Y_batch in flow(image_list[0:-test_size]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            loss = model.train_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('train loss', loss)])

        print('Testing...')
        # test time!
        progbar = generic_utils.Progbar(test_size)
        for X_batch, Y_batch in flow(image_list[-test_size:]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            score = model.test_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('test loss', score)])
    return model, model.get_weights()
Example #15
0
def define_autoencoder():
    # encoder
    autoencoder = Sequential()
    autoencoder.add(Conv2D(16, (3, 3), strides=1, activation="relu",
                           padding="same", input_shape=(28, 28, 1)))
    autoencoder.add(MaxPooling2D((2, 2), padding="same"))
    autoencoder.add(Conv2D(8, (3, 3), strides=1,
                           activation="relu", padding="same"))
    autoencoder.add(MaxPooling2D((2, 2), padding="same"))
    # decoder
    autoencoder.add(Conv2D(8, (3, 3), strides=1,
                           activation="relu", padding="same"))
    autoencoder.add(UpSampling2D((2, 2)))
    autoencoder.add(Conv2D(16, (3, 3), strides=1,
                           activation="relu", padding="same"))
    autoencoder.add(UpSampling2D((2, 2)))
    autoencoder.add(Conv2D(1, (3, 3), strides=1,
                           activation="sigmoid", padding="same"))

    autoencoder.compile(optimizer="adam", loss="binary_crossentropy")
    initial_weights = autoencoder.get_weights()
    return initial_weights, autoencoder
Example #16
0
def _test_equivalence(channel_order=None):

    from kfs.layers.convolutional import Convolution2DEnergy_TemporalBasis
    from keras.models import Sequential
    #from keras.layers import Flatten, Dense
    input_shape = (12, 3, 64, 64)
    if channel_order is None:
        channel_order = K.image_data_format()
    if channel_order == 'channels_last':
        input_shape = (12, 64, 64, 3)


    nn = Sequential()
    nn.add(Convolution2DEnergy_TemporalBasis(8, 16, 4, (5, 5), 7,
                                            padding='same',
                                            input_shape=input_shape,
                                            data_format=channel_order))

    rng = np.random.RandomState(42)
    datums = rng.randn(6, 12, 3, 64, 64).astype('float32')
    if channel_order == 'channels_last':
        datums = datums.transpose(0, 1, 3, 4, 2)


    nn.compile(loss='mse', optimizer='sgd')

    nn2 = Sequential()
    nn2.add(Convolution2DEnergy_TemporalCorrelation(8, 16, 4, (5, 5), 7,
                                            padding='same',
                                            input_shape=input_shape,
                                            data_format=channel_order))
    nn2.compile(loss='mse', optimizer='sgd')
    nn2.set_weights(nn.get_weights())

    pred1 = nn.predict(datums)
    pred2 = nn2.predict(datums)
    assert ((pred1 - pred2) == 0.).all()

    return nn, nn.predict(datums), nn2, nn2.predict(datums)
Example #17
0
opt = Adam(lr=0.0001)

# compile the model
model.compile(optimizer=opt, loss='binary_crossentropy', 
              metrics=['accuracy',recall_m,precision_m,f1_m])

# add checkpoints
filepath="./models/gamestatesLSTM/weights-improvement-{epoch:02d}-{val_accuracy:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')

# SOS REMEMBER TO CLOSE f
# log the weights to check if they get updated during training
f = open('./models/gamestatesLSTM/weights.csv','w')
writer = csv.writer(f, delimiter='|')
print_weights = LambdaCallback(on_epoch_end=lambda batch, 
                               logs: writer.writerow(model.get_weights()))

# This callback will stop the training when there is no improvement in  
# the validation loss for 5 consecutive epochs.  
early_stop = EarlyStopping(monitor='val_loss',min_delta =0.01,
                           patience=10,mode='min')

callbacks_list = [checkpoint,print_weights,early_stop]

# train
history = model.fit(x_train,y_train, validation_data=(x_test,y_test),
                    callbacks=callbacks_list,epochs=30,batch_size=100,
                    verbose=0)

f.close()
X_test /= 255
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)

X_wholeset = np.vstack([X_train, X_test])
X_wholeset_tmp = X_wholeset

all_params = []
print('PRETRAINING')
for i in range(len(layer_sizes)-1):
    temp_ae_model = Sequential()
    temp_ae_model.add(DAE(layer_sizes[i], layer_sizes[i+1], activation='sigmoid', corruption_level=0.3))
    temp_ae_model.compile(loss='mean_squared_error', optimizer='adam')
    temp_ae_model.fit(X_wholeset_tmp, X_wholeset_tmp, nb_epoch=nb_pretrain_epochs[i], batch_size=batch_sizes[i])
    X_wholeset_tmp = temp_ae_model.predict(X_wholeset_tmp)
    W, b, bT = temp_ae_model.get_weights()
    all_params.append((W, b, bT))
# create model for fine tuning
final_ae_model = Sequential()
for i in range(len(layer_sizes)-1):
    dense_layer = Dense(layer_sizes[i], layer_sizes[i+1], activation='sigmoid')
    final_ae_model.add(dense_layer)
final_ae_model.add(Dense(layer_sizes[-1], nb_classes, activation='sigmoid'))
final_ae_model.add(Activation('softmax'))
final_ae_model.compile(loss='categorical_crossentropy', optimizer='adam')
# initialize weights
for i in range(len(layer_sizes)-1):
    W, b, bT = all_params[i]
    final_ae_model.layers[i].set_weights([W, b])
# finetune
print('FINETUNING')
Example #19
0
class KerasRegressionModel(RegressionModel):
    def __init__(self,
                 arity=1,
                 network_structure=(1, ),
                 activation_function="tanh",
                 error_metric="rmse",
                 optimizer_type="nadam",
                 learning_rate=None,
                 loss_function="mse",
                 nb_epoch=1,
                 batch_size=100,
                 early_stopping=False,
                 weight_init_method="normal",
                 graphical_verbose=False,
                 validation_split=0.1,
                 dropout=False,
                 dropout_input_layer_fraction=0.2,
                 dropout_hidden_layer_fraction=0.5,
                 batch_normalization=False,
                 verbose=False,
                 weight_decay=False,
                 weigt_decay_parameter=0.001,
                 **kwargs):
        """

        A class to construct arbitrary artifical neural networks using Keras library (http://keras.io/).
        The module supports state-of-the-art technologies for optimization and regularization of ANNs.

        :param network_structure: A tuple which specifies the number of neurons for each layer
        :param activation_function: Activation function used, cf. http://keras.io/activations/
        :param error_metric: Error metric
        :param optimizer_type: Specifies the optimization method used
        :param loss_function: Loss function (given by Keras or custom loss functions), cf. http://keras.io/objectives/
        :param nb_epoch: Number of training epochs
        :param batch_size: Batch size used for mini-batch learning
        :param early_stopping: If set True, training will be interruptped when the loss isn't decaying anymore
        :param init: Method of weight initialization, e.g normal, glorot_normal, uniform
        :param arity: Input dimension
        :param verbose: Verbose mode, verbose=1 show progress bar logging, verbose=2 show console logging
        :param graphical_verbose: If True,
        :param dropout: Use dropout layers for regularization
        :param dropout_input_layer_fraction: Fraction of input units to drop
        :param dropout_hidden_layer_fraction: Fraction hidden layer units to drop
        :param batch_normalization: Activate batch normalization
        :param weight_decay: Activate weight decay regularization method
        :param weight_decay_parameter: Sets the weight decay regularization parameter
        :param kwargs:
        """

        super(RegressionModel, self).__init__()
        #self.logger.info("Compiling ANN...")
        self.__dict__.update(locals())

        # Initialize ANN structure

        self.__model = Sequential()

        self.input_layer_params = {
            "input_shape": (self.arity, ),
            "activation": self.activation_function,
            "output_dim": self.network_structure[0],
            "init": self.weight_init_method
        }

        self.hidden_layer_params = {
            "activation": self.activation_function,
            "init": self.weight_init_method
        }
        if self.weight_decay:
            self.hidden_layer_params["W_regularizer"] = l2(
                weigt_decay_parameter)

        self.output_layer_params = {
            "activation": "linear",
            "init": self.weight_init_method
        }

        self.create_input_layer()  # stack up remaining layers.
        self.create_hidden_layers()
        self.create_output_layer()

        # compile the neural network
        self.__model.compile(optimizer=RMSprop(lr=0.001),
                             loss=self.loss_function)
        #self.logger.info("Compilation completed...")
        self.func = self.__model.predict

    def add_layer(self, num_nodes, layer_params, dropout=False):
        self.__model.add(Dense(num_nodes, **layer_params))
        if (dropout):
            self.__model.add(Dropout(self.dropout_hidden_layer_fraction))

    def create_input_layer(self):
        if self.dropout:
            self.__model.add(
                Dropout(self.dropout_input_layer_fraction,
                        input_shape=(self.arity, )))
            del self.input_layer_params["input_shape"]
        self.__model.add(Dense(**self.input_layer_params))
        if self.batch_normalization:
            self.__model.add(BatchNormalization())

    def create_hidden_layers(self):
        for num_nodes in self.network_structure[1:-1]:
            self.add_layer(num_nodes,
                           self.hidden_layer_params,
                           dropout=self.dropout)
            if self.batch_normalization:
                self.__model.add(BatchNormalization())

    def create_output_layer(self):
        self.add_layer(self.network_structure[-1], self.output_layer_params)
        if self.batch_normalization:
            self.__model.add(BatchNormalization())

    @property
    def weights(self):
        return self.__model.get_weights()

    #@doc_inherit
    def fit(self, xfit, yfit):
        self.hist = self.__model.fit(xfit,
                                     yfit,
                                     nb_epoch=self.nb_epoch,
                                     batch_size=self.batch_size,
                                     verbose=self.verbose,
                                     validation_split=self.validation_split
                                     )  #, callbacks=self.callbacks)
        return self

    def __getstate__(self):
        """
        Function to make ANNRegressionModel pickable.
        The weights, the architecture as also the ANN compilation settings are stored in a dictionary.
        :return: The dictionary containing ANN architecture in json format, weight and ANN compilation setting
        """

        state = copy(self.__dict__)
        del state["func"]
        #del state["logger"]
        #del state["_ANNRegressionModel__model"]
        del state["hist"]
        return dict(json_model=self.__model.to_json(),
                    weights=self.__model.get_weights(),
                    config=state)

    def __setstate__(self, d):
        """
        Function to make ANNRegressionModel pickable
        :param d:
        :return:
        """

        self.__dict__ = d["config"]
        self.__model = model_from_json(d["json_model"])
        self.__model.set_weights(d["weights"])
        self.func = self.__model.predict

    def print_summary(self):
        """
        Print summary of the neural network, includes architecture and compiling setting.
        :return:
        """
        self.__model.summary()
labelsDepy = np_utils.to_categorical(y_deploy)

model = Sequential()
model.add(
    Dense(40, input_shape=(10, ), activation="relu", W_regularizer=l2(0.001)))
model.add(Dropout(0.5))
model.add(Dense(2, activation="softmax"))
model.load_weights(
    '/Users/salemameen/Desktop/banditsbook/python_valley/valleyModelbest.hdf5')
model.compile(loss='categorical_crossentropy',
              metrics=['accuracy'],
              optimizer='adam')
# Actual modelling

score, accuracy = model.evaluate(X_deploy,
                                 labelsDepy,
                                 batch_size=100,
                                 verbose=0)
print("Test fraction correct (NN-Score) = {:.2f}".format(score))
print("Test fraction correct (NN-Accuracy) = {:.2f}".format(accuracy))

####################################################
#n_samples ,_=Y_test.shape
SamplingTesting = 500
All_weights = model.get_weights()
All_weights_BUCKUP = model.get_weights()
FC_weights_3 = All_weights[0]
row, col = shape(FC_weights_3)
SizeWights = row * col
OldAccuracy = accuracy
Example #21
0
    ])

## Compile our model using the method of least squares (mse) loss function
## and a stochastic gradient descent (sgd) optimizer
model.compile(loss='mse', optimizer='sgd') ## To try our model with an Adam optimizer simple replace 'sgd' with 'Adam'

## Set our learning rate to 0.01 and print it
model.optimizer.lr.set_value(.001)
print model.optimizer.lr.get_value()

## Fit our model to the noisy data we create above. Notes:
## The validation split parameter reserves 20% of our data for validation (ie 80% will be used for training)
## I don't really know if using a batch size of 1 makes sense
history = model.fit(x=df['x'], y=df['y'], validation_split=0.2, batch_size=1, epochs=100)

## ---------- Review our weights -------------------
## Save and print our final weights
predicted_m = model.get_weights()[0][0][0]
predicted_b = model.get_weights()[1][0]
print "\nm=%.2f b=%.2f\n" % (predicted_m, predicted_b)

## Create our predicted y's based on the model
df['y_predicted'] = df['x'].apply(lambda x: predicted_m*x + predicted_b)

## Plot the original data with a standard linear regression
ax1 = sns.regplot(x='x', y='y', data=df, label='real')

## Plot our predicted line based on our Keras model's slope and y-intercept
ax2 = sns.regplot(x='x', y='y_predicted', data=df, scatter=False, label='predicted')
ax2.legend(loc="upper left")
    code_model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    labels = to_categorical(training['labels'], num_classes=3)
    code_model.fit(training['code'],labels,shuffle=True,epochs=10,batch_size=32,verbose=2)
    predicted_labels=np.argmax(code_model.predict(test['code']),axis=1)
    conf_code_mlp1=confusion_matrix(test['labels'],predicted_labels)
    conf_code_mlp+=conf_code_mlp1

    #code full train
    full_model=Sequential()
    for j in range(num_of_hidden_layers):
        if j==0:
            full_model.add(Dense(num_of_neurons[j],activation='relu',input_dim=len(training['data'][0,:])))
        else:
            full_model.add(Dense(num_of_neurons[j],activation='relu'))
    deneme=full_model.get_weights()
    for klm in range(len(deneme)):
        np.copyto(deneme[klm],weights[klm])
    full_model.set_weights(deneme)
    full_model.add(Dense(32, activation='relu'))
    full_model.add(Dropout(0.2))
    full_model.add(Dense(8, activation='relu'))
    full_model.add(Dropout(0.2))
    full_model.add(Dense(3,activation='softmax'))
    full_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    full_model.fit(training['data'],labels,shuffle=True,epochs=10,batch_size=32,verbose=2)
    predicted_labels=np.argmax(full_model.predict(test['data']),axis=1)
    conf_full_pre1=confusion_matrix(test['labels'],predicted_labels)
    conf_full_pre+=conf_full_pre1

    print(conf_raw_rule1)
Example #23
0
class DQN_agent:
    def __init__(self, env):
        self.env = env
        self.memory = deque(
            maxlen=20000)  #length of the replay memory is set here
        self.learning_rate = .01  #learning rate used in the optimizer
        self.lr_decay = .95  #learning rate decay
        self.gamma = .95  #discount factor
        self.epsilon = 1.0  #exploration parameter
        self.epsilon_min = .001  #least exploration parameter, once fully attenuated
        self.decay_rate = .9  #rate at which epsilon (exploration parameter) is attenuated
        self.model = self.create_model()  #create a NN for Q-learning
        self.target_model = self.create_model(
        )  #create another NN to be maintained as target
        self.batch_size = 32  #batch size of the sample from replay memory
        self.tau = .125

    def add_to_memory(
            self, cur_state, action, reward, next_state,
            done):  #this function can be called to add to SARS to memory
        self.memory.append([cur_state, action, reward, next_state, done])

    def create_model(self):  #Merely a function that creates a NN model
        self.model = Sequential()
        self.model.add(
            Dense(512,
                  activation='relu',
                  input_dim=self.env.observation_space.shape[0],
                  kernel_initializer='normal'))
        self.model.add(Dropout(.3))
        self.model.add(
            Dense(512, activation='relu', kernel_initializer='normal'))
        self.model.add(Dense(self.env.action_space.n))
        self.model.compile(loss='mean_squared_error',
                           optimizer=Adam(lr=self.learning_rate,
                                          decay=self.lr_decay))
        return self.model

    def act(self, cur_state):  #act in an epsilon greedy manner
        self.epsilon = self.epsilon * self.decay_rate
        self.epsilon = max(self.epsilon, self.epsilon_min)
        if (np.random.uniform(0, 1) < self.epsilon):
            return (np.random.choice(range(self.env.action_space.n))
                    )  #pick a random action
        return np.argmax(
            self.model.predict(cur_state)[0])  #pick in a Q-greedy manner

    def train_using_replay(self):  #train using replay memory
        if (len(self.memory) < self.batch_size):
            return
        samples = random.sample(self.memory, self.batch_size)
        #np.random.choice() needs a single dimenional list as input. If anything else, then better use random.sample(list, size)
        #For this, first import random.
        for sample in samples:
            state, action, reward, nxt_state, done = sample
            target = self.target_model.predict(state)
            if (done):
                target[0][action] = reward
            else:
                target[0][action] = reward + self.gamma * max(
                    self.target_model.predict(nxt_state)[0])
            self.model.fit(state, target, epochs=1, verbose=0)

    def train_target(self):
        weights = self.model.get_weights()
        target_weights = self.target_model.get_weights()
        for i in range(len(weights)):
            target_weights[i] = self.tau * weights[i] + target_weights[i] * (
                1 - self.tau)
        self.target_model.set_weights(target_weights)

    def save_model(self, file_name):
        self.target_model.save(
            file_name)  #Save target model and weights in file with file_name.
Example #24
0
                  np.array([-0.5])]
'''
output_weights = None

model = Sequential()

model.add(SimpleRNN(input_dim=1, output_dim=2, init='normal',
                    inner_init='orthogonal', activation=steeper_sigmoid,
                    weights=RNNlayer_weights,
                    return_sequences=True))

model.add(Dense(2, 1, init='normal', activation=steeper_sigmoid, weights=output_weights))

model.compile(loss='binary_crossentropy', optimizer='Adagrad')

initialWeights = model.get_weights()

history = model.fit(X_train3D, Y_train3D, batch_size=batchsize, nb_epoch=1000, show_accuracy=True)

score = model.evaluate(X_test3D, Y_test3D, show_accuracy=True)

print("score (loss, accuracy):")
print(score)

print("predicted output:")
print(model.predict(X_test3D, verbose=1))
print("actual output:")
print(Y_test)
print("actual input:")
print(X_test)
print('initial weights:')
Example #25
0
    def fit_lstm(self, train):
        # reshape training into [samples, timesteps, features]
        # timestesp is 1 as there is 1 sample per day
        X, y = train[:, 0:self.n_lag * self.number_of_indicators], \
               train[:, self.n_lag * self.number_of_indicators:]
        # design network
        model = Sequential()
        # source https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/
        if self.model_type == "vanilla":
            X = X.reshape(X.shape[0], self.n_lag, self.number_of_indicators)
            model.add(LSTM(self.number_of_indicators, batch_input_shape=(self.n_batch, self.n_lag, self.number_of_indicators), stateful=True)) #batch_input_shape=(self.n_batch, X.shape[1], X.shape[2])
            model.add(Dense(y.shape[1]))
        elif self.model_type == "stacked":
            # 2 hidden layers, but can be modified
            X = X.reshape(X.shape[0], self.n_lag, self.number_of_indicators)
            model.add(LSTM(self.number_of_indicators, batch_input_shape=(self.n_batch, self.n_lag, self.number_of_indicators), #batch_input_shape=(self.n_batch, X.shape[1], X.shape[2])
                           return_sequences=True, stateful=True))
            model.add(LSTM(int(self.number_of_indicators * 2/3 + y.shape[1])))
            model.add(Dense(y.shape[1]))
        elif self.model_type == "bi":
            X = X.reshape(X.shape[0], self.n_lag, self.number_of_indicators)
            model.add(Bidirectional(LSTM(self.number_of_indicators, stateful=True),batch_input_shape=(self.n_batch, self.n_lag, self.number_of_indicators))) #batch_input_shape=(self.n_batch, X.shape[1], X.shape[2])
            model.add(Dense(y.shape[1]))

        elif self.model_type == "cnn":
            X = X.reshape(X.shape[0], 1, self.n_lag, self.number_of_indicators)
            model.add(TimeDistributed(Conv1D(filters=64, kernel_size=1),
                                      batch_input_shape=(self.n_batch, None, self.n_lag, self.number_of_indicators))) #batch_input_shape=(None, X.shape[1], X.shape[2])
            model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
            model.add(TimeDistributed(Flatten()))
            model.add(LSTM(self.number_of_indicators))
            model.add(Dense(y.shape[1]))

        elif self.model_type == "conv":
            X = X.reshape(X.shape[0], 1, 1, self.n_lag, self.number_of_indicators)
            model.add(ConvLSTM2D(filters=64, kernel_size=(1, 2),
                                 batch_input_shape=(self.n_batch, 1, 1, self.n_lag, self.number_of_indicators)))
            model.add(Flatten())
            model.add(Dense(y.shape[1]))
        else:
            raise ValueError("self.model_type is not any of the specified")

        model.compile(loss='mean_squared_error', optimizer='adam')
        print("Model Type: ", self.model_type)
        print("train X size:", len(X), " shape:", X.shape, "train y size:", len(y), " shape: ", y.shape)
        #print("train X data", X)
        #print("train y data", y)
        # fit network
        print("Training model with batch size", self.n_batch)
        model.summary()
        #print("X shape:", X.shape, " y shape:", y.shape)
        for i in range(self.n_epochs):
            model.fit(X, y, epochs=1, batch_size=self.n_batch, verbose=0, shuffle=False)
            model.reset_states()

        #self.save_plot_model(model, note="fullbatch")
        # source https://machinelearningmastery.com/use-different-batch-sizes-training-predicting-python-keras/
        # Create a new model with batch size 1 and give the trained weight, this allows the model
        # to be used to predict 1 step instead of batches
        n_batch = 1
        new_model = Sequential()
        # source https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/
        if self.model_type == "vanilla":
            new_model.add(LSTM(self.number_of_indicators, batch_input_shape=(n_batch, self.n_lag, self.number_of_indicators), stateful=True)) #batch_input_shape=(self.n_batch, X.shape[1], X.shape[2])
            new_model.add(Dense(y.shape[1]))
        elif self.model_type == "stacked":
            # 2 hidden layers, but can be modified
            new_model.add(LSTM(self.number_of_indicators, batch_input_shape=(n_batch, self.n_lag, self.number_of_indicators), #batch_input_shape=(self.n_batch, X.shape[1], X.shape[2])
                           return_sequences=True, stateful=True))
            new_model.add(LSTM(int(self.number_of_indicators * 2/3 + y.shape[1])))
            new_model.add(Dense(y.shape[1]))
        elif self.model_type == "bi":
            new_model.add(Bidirectional(LSTM(self.number_of_indicators, stateful=True),batch_input_shape=(n_batch, self.n_lag, self.number_of_indicators))) #batch_input_shape=(self.n_batch, X.shape[1], X.shape[2])
            new_model.add(Dense(y.shape[1]))

        elif self.model_type == "cnn":
            new_model.add(TimeDistributed(Conv1D(filters=64, kernel_size=1),
                                      batch_input_shape=(n_batch, None, self.n_lag, self.number_of_indicators))) #batch_input_shape=(None, X.shape[1], X.shape[2])
            new_model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
            new_model.add(TimeDistributed(Flatten()))
            new_model.add(LSTM(self.number_of_indicators))
            new_model.add(Dense(y.shape[1]))

        elif self.model_type == "conv":
            new_model.add(ConvLSTM2D(filters=64, kernel_size=(1, 2),
                                     batch_input_shape=(n_batch, 1, 1, self.n_lag, self.number_of_indicators)))
            new_model.add(Flatten())
            new_model.add(Dense(y.shape[1]))
        else:
            raise ValueError("self.model_type is not any of the specified")

        new_model.set_weights(model.get_weights())

        print("\n\nNew model with batch size 1 for prediction")
        new_model.summary()
        return new_model
Example #26
0
new_model.add(
    LSTM(200,
         batch_input_shape=(32, None, 1),
         stateful=True,
         return_sequences=True)
)  #, stateful=True, )) #dropout=0.1, recurrent_dropout=0.1,
new_model.add(LSTM(
    200, batch_input_shape=(32, None, 1),
    stateful=True))  #, stateful=True, )) #dropout=0.1, recurrent_dropout=0.1,
#new_model.add(LSTM(300,input_shape=(None,1), return_sequences=True, stateful=True))
new_model.add(Dense(7))
new_model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=rms,
                  metrics=['accuracy'])
new_model.reset_states()
new_model.set_weights(model.get_weights())
predictions = new_model.predict(entireData3)
predictions = np.argmax(predictions, axis=1)

futureElement = predictions.reshape(len(predictions), 1, 1)

futureElements = []
futureElements.append(futureElement)

for i in range(4):

    futureElement = new_model.predict(futureElement)
    futureElement = np.argmax(futureElement, axis=1)
    futureElement = futureElement.reshape(len(futureElement), 1, 1)
    futureElements.append(futureElement)
Example #27
0
class DQN_network:
    def __init__(self, output_n):
        self.model = Sequential()
        self.model.add(
            Conv2D(32,
                   8,
                   strides=(4, 4),
                   padding='valid',
                   activation='relu',
                   input_shape=config.network_input_shape,
                   data_format='channels_first'))
        self.model.add(
            Conv2D(filters=64,
                   kernel_size=4,
                   strides=(2, 2),
                   padding='valid',
                   activation='relu',
                   data_format='channels_first'))
        self.model.add(
            Conv2D(filters=64,
                   kernel_size=3,
                   strides=(1, 1),
                   padding='valid',
                   activation='relu',
                   data_format='channels_first'))
        self.model.add(Flatten())
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dense(output_n))
        rms = keras.optimizers.RMSprop(lr=config.learning_rate,
                                       epsilon=0.01,
                                       decay=0.0)
        self.model.compile(loss='mean_squared_error',
                           optimizer=rms,
                           metrics=['accuracy'])

    def predict_action(self, state):
        state = np.reshape(state, config.network_batch_shape)
        state = np.float32(state / 255.0)
        return np.argmax(self.model.predict(state))

    def predict(self, state):
        state = np.reshape(state, config.network_batch_shape)
        state = np.float32(state / 255.0)
        return self.model.predict(state)

    def output(self, state):
        state = np.reshape(state, config.network_batch_shape)
        state = np.float32(state / 255.0)
        return self.model.predict(state)

    def train(self, state, true_target):
        state = np.reshape(state, config.network_batch_shape)
        state = np.float32(state / 255.0)
        self.model.fit(state, true_target, epochs=1, verbose=0)

    def save_weights(self, file_name):
        self.model.save_weights(file_name)

    def load_weights(self, file_name):
        self.model.load_weights(file_name)

    def get_weights(self):
        return self.model.get_weights()

    def set_weights(self, weights):
        self.model.set_weights(weights)
Example #28
0
class Network(object):
    """
    Base class for the various neural networks.
    """
    def __init__(self):
        self.metrics = ()
        self.model = Sequential()

    def first_layer_output(self, x):
        weights = self.get_layer_weights(1)
        W = weights[0]
        b = weights[1]

        return np.dot(x, W) + b

    def predict_on_batch(self, x):
        return self.model.predict_on_batch(x)

    def get_weights(self, layer=None):
        if layer is None:
            return self.model.get_weights()
        return self.model.layers[layer].get_weights()

    def weight_shapes(self):
        return self.get_weights()[0].shape, self.get_weights()[1].shape

    def set_layer_weights(self, layer, weights):
        self.model.layers[layer].set_weights(
            [weights, self.get_weights(layer)[1]])

    def set_layer_bias(self, layer, bias):
        self.model.layers[layer].set_weights(
            [self.get_weights(layer)[0], bias])

    def set_layer_parameters(self, layer, weights, bias):
        self.model.layers[layer].set_weights([weights, bias])

    def get_layer_weights(self, layer):
        return self.model.get_layer(index=layer).get_weights()

    def train_once(self, data, batch_size):
        self.model.fit(data[0], data[1], epochs=1, batch_size=batch_size)

    def train_on_generator(self, training_set_generator, batches_per_epoch,
                           epochs, verbose):
        h = self.model.fit_generator(training_set_generator,
                                     batches_per_epoch,
                                     epochs,
                                     verbose=verbose)
        loss = h.history['loss'][epochs - 1]
        acc = h.history['categorical_accuracy'][epochs - 1]
        self.metrics = '{0:.3g}'.format(loss), '{0:.3g}'.format(acc)

    def save(self, relative_path, filename=None):
        if filename is None:
            filename = 'model'

        absolute_path = ''.join([path(), relative_path, filename])
        network_out = ''.join([absolute_path, '.yaml'])
        weight_out = ''.join([absolute_path, '.h5'])

        model_yaml = self.model.to_yaml()
        with open(network_out, 'w') as yaml_file:
            yaml_file.write(model_yaml)
        self.model.save_weights(weight_out)

    def load(self, relative_path, filename):
        absolute_path = ''.join([path(), relative_path, filename])
        network = ''.join([absolute_path, '.yaml'])
        weights = ''.join([absolute_path, '.h5'])

        with open(network, 'r') as yaml_file:
            loaded_model_yaml = yaml_file.read()

        self.model = model_from_yaml(loaded_model_yaml)
        self.model.load_weights(weights)
Example #29
0
    def test_gru_layer(self):
        i = 0
        numerical_err_models = []
        shape_err_models = []
        numerical_failiure = 0
        for base_params in self.base_layer_params:
            base_params = dict(zip(self.params_dict.keys(), base_params))
            for gru_params in self.gru_layer_params:
                gru_params = dict(zip(self.gru_params_dict.keys(), gru_params))
                model = Sequential()
                if keras.__version__[:2] == '2.':
                    model.add(
                        GRU(
                            base_params['output_dim'],
                            input_shape=(base_params['input_dims'][1],base_params['input_dims'][2]),
                            activation=base_params['activation'],
                            recurrent_activation=gru_params['inner_activation'],
                            return_sequences=base_params['return_sequences'],
                            go_backwards=base_params['go_backwards'],
                            unroll=base_params['unroll'],
                        )
                    )
                else:
                    model.add(
                        GRU(
                            base_params['output_dim'],
                            input_length=base_params['input_dims'][1],
                            input_dim=base_params['input_dims'][2],
                            activation=base_params['activation'],
                            inner_activation=gru_params['inner_activation'],
                            return_sequences=base_params['return_sequences'],
                            go_backwards=base_params['go_backwards'],
                            unroll=base_params['unroll'],
                        )
                    )
                model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
                mlkitmodel = _get_mlkit_model_from_path(model)
                input_data = generate_input(base_params['input_dims'][0], base_params['input_dims'][1],
                                            base_params['input_dims'][2])
                
                activations_to_test_with_numpy = {'linear', 'relu'}
                if base_params['activation'] in activations_to_test_with_numpy or gru_params['inner_activation'] in activations_to_test_with_numpy:
                    keras_preds = get_numpy_prediction_gru(model, input_data).flatten()                    
                else:
                    keras_preds = model.predict(input_data).flatten()
                
                input_data = np.transpose(input_data, [1, 0, 2])
                coreml_preds = mlkitmodel.predict({'data': input_data})['output'].flatten()
                if K.tensorflow_backend._SESSION:
                    import tensorflow as tf
                    tf.reset_default_graph()
                    K.tensorflow_backend._SESSION.close()
                    K.tensorflow_backend._SESSION = None
                try:
                    self.assertEquals(coreml_preds.shape, keras_preds.shape)
                except AssertionError:
                    print("Shape error:\nbase_params: {}\n gru_params: {}\nkeras_preds.shape: {}\ncoreml_preds.shape: {}".format(
                        base_params, gru_params, keras_preds.shape, coreml_preds.shape))
                    shape_err_models.append(base_params)
                    i += 1
                    continue
                try:
                    max_denominator = np.maximum(np.maximum(np.abs(coreml_preds), np.abs(keras_preds)), 1.0)
                    relative_error = coreml_preds/max_denominator - keras_preds/max_denominator
                    for i in range(len(relative_error)):
                        self.assertLessEqual(relative_error[i], 0.01)
                except AssertionError:
                    print("===============Assertion error:\n base_params: {}\n gru_params: {}\n\n keras_preds: {}\n\n coreml_preds: {}\n\n\n keras_preds: {}\n\n\n coreml_preds: {}\n".format(base_params,
                                                                                                        gru_params,    
                                                                                                        keras_preds/max_denominator, 
                                                                                                        coreml_preds/max_denominator,
                                                                                                        keras_preds,
                                                                                                        coreml_preds))
                    numerical_failiure += 1
                    numerical_err_models.append(base_params)
                i += 1

        self.assertEquals(shape_err_models, [], msg='Shape error models {}'.format(shape_err_models))
        self.assertEquals(numerical_err_models, [], msg='Numerical error models {}'.format(numerical_err_models))
        if (rede != 0 and transferenciaDeConhecimento):
            modelo.set_weights(pesos)

        log = modelo.fit(dados['XT'],
                         dados['YT'],
                         validation_data=(dados['XV'], dados['YV']),
                         batch_size=1,
                         epochs=100,
                         verbose=1)

        modelo.save(caminhoModelo)

        modelos.append(modelo)

        pesos = modelo.get_weights()

        if (transferenciaDeConhecimento):
            fig.suptitle(
                'Acurácia por Epochs com Transferência de Conhecimento',
                fontsize=16)
        else:
            fig.suptitle('Acurácia por Epochs', fontsize=20)

        plt.subplot(231 + rede)
        plt.plot(log.history['acc'])
        plt.plot(log.history['val_acc'])
        plt.title('RNA' + str(rede))
        plt.ylabel('Acurácia')
        plt.xlabel('Epochs')
        plt.grid(True)
Example #31
0
    def add_hist(self, var, history):
        """Add further fitting history to a certain variable."""
        ind = self.history['var'].index(var)
        for key in self.history.keys():
            if key is not 'var':
                self.history[key][ind] = np.concatenate(
                    (self.history[key][ind], history.history[key])).tolist()

    def save(self, fname):
        with open(fname, 'w') as file:
            file.write(json.dumps(self.history))


#--------------------

for i in range(3, 11):
    apphist_layers = AppendedHistory(varname='weights')
    for j in range(10):
        one_hl = Sequential()
        one_hl.add(Dense(2**i, input_dim=x_train.shape[1], activation='relu'))
        one_hl.add(Dense(5, input_dim=x_train.shape[1], activation='softmax'))
        np.save('/output/weights_{}-{}'.format(2**i, j), one_hl.get_weights())
        one_hl.compile(loss='categorical_crossentropy',
                       optimizer='adam',
                       metrics=['acc'])
        hist_one_hl_layers = one_hl.fit(x_train, y_train, epochs=300, batch_size=128, \
                   verbose=0, validation_data=(x_test, y_test))
        apphist_layers.append_hist(j, hist_one_hl_layers)
    apphist_layers.save('/output/apphist_layers_{}.txt'.format(2**i))
Example #32
0
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
import numpy as np

l0 = Dense(1, input_shape=[1])
m = Sequential([l0])

m.compile(loss='mse', optimizer=Adam(0.1))

celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)

m.summary()

hist = m.fit(celsius_q, fahrenheit_a, epochs=10000, verbose=False)

print("Learned weights: {}".format(m.get_weights()))

print("{} degrees C = {} degrees F".format(0, m.predict([0])))

# Plot loss function
import matplotlib.pyplot as plt
plt.plot(hist.history['loss'])
plt.xlabel('Epoch#')
plt.ylabel('Loss')
plt.show()
def test_preprocess_weights_for_loading_for_model(layer):
    model = Sequential([layer])
    weights1 = model.get_weights()
    weights2 = saving.preprocess_weights_for_loading(
        model, convert_weights(layer, weights1), original_keras_version='1')
    assert all([np.allclose(x, y, 1e-5) for (x, y) in zip(weights1, weights2)])
    def test_function(self):
        # Define loss tensor?
        # Define weight tensors?
        # Compute gradient of loss wrt weights
        # Name scopes?
        '''

        log(a | s) * (R - V)
        # Inputs:  states, action mask, returns, values
        # Outputs: loss?
        # Updates: gradient

        Compute objective function
        Compute gradient of weights wrt objective
        update weights
        '''
        model = Sequential([
            Dense(16, input_dim=4, activation='relu'),
            Dense(8, activation='relu'),
            Dense(units=2, activation='linear')
        ])
        model.compile(loss='mse', optimizer='sgd')
        orig_weights = model.get_weights()

        # Train model normally on a single input
        x = np.random.random((1, 4))
        y_true = np.random.random((1, 2))
        train_loss = model.train_on_batch(x, y_true)

        # Undo any weight updates made
        model.set_weights(orig_weights)

        # Build a Keras function that replicates the training function
        model = model.model
        input = model._feed_inputs + model._feed_targets + model._feed_sample_weights
        output = [model.total_loss]
        update = model.optimizer.get_updates(
            params=model._collected_trainable_weights, loss=model.total_loss)

        f = K.function(inputs=input, outputs=output, updates=update)

        # Run the function on a single input
        sample_weight = np.ones((y_true.shape[0], ), dtype=K.floatx())
        f_loss = f([x, y_true, sample_weight])

        # Both methods should have accomplished the same thing
        self.assertEqual(train_loss, f_loss)

        # Create placeholder tensors for new arrays
        # Wrap loss function in closure
        # Define new train function

        def objective(y_pred, mask, advantage):
            y_pred = K.print_tensor(y_pred, 'Q(s,a) = ')
            mask = K.print_tensor(mask, 'Mask = ')
            advantage = K.print_tensor(advantage, 'Advantage = ')
            return K.print_tensor(
                -K.sum(K.log(y_pred) * mask, axis=-1) * advantage, 'loss=')

        mask = K.placeholder(shape=model.output.shape, name='mask')
        advantage = K.placeholder(ndim=1, name='advantage')
        loss = objective(model.output, mask, advantage)
        g = K.function(inputs=model._feed_inputs + [mask, advantage],
                       outputs=[loss],
                       updates=model.optimizer.get_updates(
                           params=model._collected_trainable_weights,
                           loss=loss))

        mask = np.array([0, 1.]).reshape((1, -1))
        advantage = np.array([2.])

        t1 = model.predict(x)
        t0 = g([x, mask, advantage])
        t2 = model.predict(x)

        pass
model.fit(xTrain,
          yTrain,
          batch_size=1,
          epochs=100,
          verbose=2,
          validation_data=(xTest, yTest),
          callbacks=[tensorbrd])
duration = (dt.datetime.now() - start)
print("\nDauer: " + str(duration))

#Step 5: Evaluierung
print('Klassifikationgüte: ' + repr(model.evaluate(xTest, yTest)[1]))

#Step 6: Sicherung des berechneten Modells als csv und rtf
#Ermittlung der weights und biases
result = model.get_weights()
weights_h1 = result[0]
biases_b1 = result[1]
weights_out = result[2]
biases_out = result[3]

print('\n--------------Model wird gespeichert(csv)-------------')
print('Sichere Data/NNweights_h1.csv')
savetxt('Data/NNweights_h1.csv', weights_h1, fmt='%10.8f', delimiter=' ')
print('Sichere Data/NNbiases_b1.csv')
savetxt('Data/NNbiases_b1.csv', biases_b1, fmt='%10.8f', delimiter=' ')
print('Sichere Data/NNweights_out.csv')
savetxt('Data/NNweights_out.csv', weights_out, fmt='%10.8f', delimiter=' ')
print('Sichere Data/NNbiases_out.csv')
savetxt('Data/NNbiases_out.csv', biases_out, fmt='%10.8f', delimiter=' ')
model.add( Dense(input_dim=cols, output_dim=1, init='uniform') )


''' compile '''
sgd = SGD( lr=1e-3, decay=1e-4, momentum=0 )
model.compile( loss='mean_squared_error',optimizer=sgd )


''' fit (train) '''
n_epochs = 100000
model.fit( X_test, Y_test, nb_epoch=n_epochs, verbose=0 )

#save the model
json_string = model.to_json()
open('lin_reg_model.json', 'w').write(json_string)
model.save_weights('lin_reg_weights.h5')
print "Saved model and weights..."


''' evaluate (test) '''
#score = model.evaluate( X_test, Y_true, verbose=0 )
pred = model.predict( X_test )
print "Prediction \t Y_true \t Pred. Error"
print np.hstack([pred,Y,np.abs(pred-Y)])
print "Model weights = ", model.get_weights()


''' LS solution '''
#theta = np.dot( np.linalg.inv( np.dot(np.transpose(X_test), X_test) ), np.dot(np.transpose(X_test),Y_test) )
#print theta
Example #37
0
    def build_generator(self):
        # Generator
        # 1) 32 * window_size Conv1D layers with RELU and Dropout

        noise_shape = self.input_shape

        model = Sequential()

        model.add(
            Conv1D(hidden_filters_1,
                   128,
                   padding="same",
                   strides=1,
                   input_shape=noise_shape,
                   activation='relu',
                   dilation_rate=10,
                   name='gen_conv1d_1'))
        model.add(Dropout(dropout_rate, name='gen_dropout_1'))

        model.add(
            Conv1D(hidden_filters_1,
                   256,
                   padding="same",
                   strides=1,
                   activation='relu',
                   dilation_rate=5,
                   name='gen_conv1d_2'))
        model.add(Dropout(dropout_rate, name='gen_dropout_2'))

        model.add(
            Conv1D(hidden_filters_1,
                   128,
                   padding="same",
                   strides=1,
                   dilation_rate=1,
                   activation='relu',
                   name='gen_conv1d_3'))
        model.add(Dropout(dropout_rate, name='gen_dropout_3'))

        # 2) 1 * 16 Conv1D layers with Linear
        # NOTE: All same padding
        model.add(
            Conv1D(output_filters,
                   output_kernel_size,
                   padding='same',
                   strides=1,
                   activation='linear',
                   name='gen_conv1d_output'))

        print "Generator"
        model.summary()

        noise = Input(shape=noise_shape)
        img = model(noise)

        # load weights for generator if specified
        if model_path:
            print "-" * 50
            print model.get_weights()
            model.load_weights(model_path, by_name=True)
            print "-" * 50
            print model.get_weights()

        return Model(noise, img)
Example #38
0
class MyLSTM(object):
    def __init__(self, input_size, num_hidden_layers, hidden_layer_sizes, output_size,
                 epochs=50, batch_size=1, fit_verbose=2):
        self.input_size = input_size
        self.num_hidden_layers = num_hidden_layers
        self.hidden_layer_sizes = hidden_layer_sizes
        self.output_size = output_size
        self.epochs = epochs
        self.batch_size = batch_size
        self.verbose = fit_verbose

        self.build_model()

    def build_model(self):
        self.model = Sequential()
        self.model.add(LSTM(self.hidden_layer_sizes[0], input_shape=(None, self.input_size), return_sequences=True))
        for i in range(1, self.num_hidden_layers - 1):
            self.model.add(LSTM(self.hidden_layer_sizes[i], return_sequences=True))
        self.model.add(LSTM(self.hidden_layer_sizes[len(self.hidden_layer_sizes) - 1]))
        self.model.add(Dense(self.output_size))
        self.model.compile(loss='mean_squared_error', optimizer='adam')

    def predict(self, data):
        """
            Runs the data in the data parameter through the network and
            returns a list of predicted values.

             data - a matrix of data (explanatory variables) to be sent through the LSTM
        """
        return self.model.predict(data)


    def get_weights(self):
        """
            Returns the weights for each layer in the network (list of arrays).
        """
        return self.model.get_weights()


    def set_weights(self, weights):
        """
            Sets the weights of the network.
        """
        self.model.set_weights(weights)


    def save_model_weights(self, filename):
        """
            Saves the model weights to a file. File name should have extension
            'h5'.
        """
        self.model.save_weights(filename)


    def load_model_weights(self, filename):
        """
            Loads the model weights from a file. File name should have extension
            'h5'.
        """
        self.model.load_weights(filename)


    def train(self, train_x, train_y, optimzer='adam'):
        """
            Trains the model using the Adam optimization algortihm (more to be implemented
            later). Creates a 'history' attr of the LSTM.

            train_x - a matrix of explanatory variables for training
            train_y - a matrix of dependent variables to train on
            optimizer - optimization algorithm (Adam is the only one implemented)
         """
        self.history = self.model.fit(train_x, train_y, epochs=self.epochs, batch_size=self.batch_size,
                                      verbose=self.verbose, shuffle=False)
Example #39
0
    print(ite, loss)


# In[16]:

### 書き込み用ファイルを生成
f = open("vectors.txt", "w")

### 語彙数と特徴ベクトルの次元数を書き込む
f.write( " ".join([str(V-1), str(dim)]) )
f.write("\n")


# In[17]:

vectors = cbow.get_weights()[0]

### 学習で得られた単語の特徴ベクトルを書き込む
for word, i in tokenizer.word_index.items():
    f.write(word)
    f.write(" ")
    f.write(" ".join(map(str, list(vectors[i,:]))))
    f.write("\n")
f.close()


# In[18]:

w2v = Word2Vec.load_word2vec_format('./vectors.txt', binary=False)

Example #40
0
class Learner(object):
    def __init__(self, n, l, L, N=0, state=True):
        print "New Learner"
        self.stop_on_error = False
        self.output_interference = True

        self.n = n  # number of dimensions in feature vectors
        self.l = l  # nodes per level
        self.L = L  # number of items
        self.LL = L + 2  # number of items (including start/end markers)
        self.N = N  # number of end competitors
        self.M = np.zeros(self.n)  # memory trace
        self.shuffle_items()

        self.model = Sequential()
        if state:
            self.model.add(
                Bidirectional(LSTM(l, return_sequences=True, stateful=state),
                              batch_input_shape=(1, 1, n)))
        else:
            self.model.add(
                Bidirectional(LSTM(l, return_sequences=True),
                              input_shape=(1, n)))
        #self.model.add(BatchNormalization())
        #self.model.add(Activation('tanh'))
        #self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Bidirectional(LSTM(l, stateful=state)))
        #self.model.add(BatchNormalization())
        #self.model.add(LeakyReLU(alpha=0.2))
        #self.model.add(Activation('tanh'))
        self.model.add(Dense(n))
        self.model.compile(loss='cosine_proximity',
                           optimizer='adam')  #mse, #rmsprop
        self.initial_weights = self.model.get_weights()

    def shuffle_items(self):
        self.items = np.random.normal(0, 1.0 / self.n,
                                      (self.LL, self.n))  # items
        self.x = self.items[0]  # start token
        self.y = self.items[-1]  # end token

    def reset(self, weights=None):
        # https://github.com/fchollet/keras/issues/341
        self.shuffle_items()
        if weights is None:
            weights = self.initial_weights
        weights = [
            np.random.permutation(w.flat).reshape(w.shape) for w in weights
        ]
        self.model.set_weights(weights)

    def seq_trial(self, epochs=1, batch=1):
        for e in range(epochs):
            self.model.reset_states()
            self.model.fit(self.items.reshape(self.LL, 1, self.n),
                           np.roll(self.items, -1, axis=0),
                           nb_epoch=1,
                           batch_size=batch,
                           shuffle=False,
                           verbose=0)

    def deblur(self, a, j):
        opts = self.items[j:]
        d = opts.dot(a)
        i = np.argmax(d)
        return opts[i], i + j

    def serial_anticipation(self):
        r = np.zeros(self.LL)
        r[0] = 1.0
        for i in range(self.LL - 1):
            j = i + 1
            f = self.items[i]
            g_ = self.probe(f, j)
            g, i_ = self.deblur(g_, j)
            if j == i_:
                r[j] = 1.0
        return r

    def probe(self, f, j):
        g_ = self.model.predict(f.reshape(1, 1, self.n))[0]
        if self.output_interference:
            self.model.fit(self.items[j - 1].reshape(1, 1, self.n),
                           g_.reshape(1, self.n),
                           nb_epoch=1,
                           batch_size=1,
                           shuffle=False,
                           verbose=0)
        g_ = self.model.predict(f.reshape(1, 1, self.n))[0]
        return g_
Example #41
0
def run():
    # params
    numbats = 1 # 100
    epochs = 5000 #20
    lr = 2./numbats #0.0001 # for SGD
    lr2 = 0.01
    evalinter = 1


    dims = 5#100
    wreg = 0.0# 0.00001

    datafileprefix = "../../data/"
    #datafileprefix = "../../data/nycfilms/"
    tensorfile = "toy.ssd"
    #tensorfile = "tripletensor.ssd"

    # get the data and split
    start = datetime.now()
    data = loaddata(datafileprefix+tensorfile)
    data.threshold(0.5)
    maxentid = max(data.maxid(1), data.maxid(2))
    #data.shiftids(0, maxentid+1)

    vocabsize = data.maxid(0)+1
    data = data.keys.lok

    trainX = data[:, [1, 0]]
    labels = data[:, 2]
    trainY = np.zeros((labels.shape[0], vocabsize)).astype("float32")
    trainY[np.arange(labels.shape[0]), labels] = 1
    batsize=int(math.ceil(data.shape[0]*1.0/numbats))

    print "source data loaded in %f seconds" % (datetime.now() - start).total_seconds()

    # train model
    print "training model"
    start = datetime.now()
    model = Sequential()
    model.add(Embedding(vocabsize, dims, W_regularizer=l2(wreg)))
    model.add(GRU(dims, activation="tanh", ))
    model.add(Dense(vocabsize, W_regularizer=l2(wreg), activation="softmax"))
    opt = SGD(lr=lr2, decay=1e-6, momentum=0.9, nesterov=True)
    opt = Adadelta()
    model.compile(optimizer=opt, loss="categorical_crossentropy")
    w = model.get_weights()
    print "model %s defined in %f" % (model.__class__.__name__, (datetime.now() - start).total_seconds())

    start = datetime.now()
    losses = LossHistory()
    model.fit(trainX, trainY, nb_epoch=epochs, batch_size=batsize, verbose=1, callbacks=[losses])
    print "model trained in %f" % (datetime.now() - start).total_seconds()

    print model.predict(np.asarray([[0, 10]]).astype("int32"))

    #print losses.losses
    plt.plot(losses.losses, "r")
    plt.show(block=False)

    save(model)

    embed()
recog1.add(Merge([recog_left,recog_right],mode = 'ave'))
recog1.add(Dense(784))

#### HERE***
recog11=Sequential()
layer=Dense(64,init='glorot_uniform',input_shape=(784,))
layer.trainable=False
recog11.add(layer)
layer2=Dense(784, activation='sigmoid',init='glorot_uniform')
layer2.trainable=False
recog11.add(layer2)
recog11.layers[0].W.set_value(np.ones((784,64)).astype(np.float32))

recog11.compile(loss='mean_squared_error', optimizer=sgd,metrics = ['mae'])

recog11.get_weights()[0].shape

gan_input = Input(batch_shape=(1,784))

gan_level2 = recog11(recog1(gan_input))

GAN = Model(gan_input, gan_level2)
GAN.compile(loss='mean_squared_error', optimizer='adam',metrics = ['mae'])

GAN.fit(x_train_orig[0].reshape(1,784), x_train_orig[0].reshape((1,784)), 
        batch_size=30, nb_epoch=100,verbose=1)

### UNIQUE BLUEPRINT
a=GAN.predict(x_train[0].reshape(1,784),verbose=1)

plt.figure(figsize=(10, 10))
Example #43
0
        Dense(d,
              activation=dnn_met,
              kernel_regularizer=keras.regularizers.l1_l2(l1=0.001, l2=0.001)))
    dp.add(Dropout(0.4))  # play with this number, such as 0.4, 0.6, 0.7
    dp.add(
        Dense(1,
              activation='relu',
              kernel_regularizer=keras.regularizers.l1_l2(l1=0.001, l2=0.001)))
    dp.compile(loss=dnn_loss, optimizer=keras.optimizers.Adam())
    dp.fit(Xnew_train,
           y_train,
           epochs=dnn_epoch,
           batch_size=bs,
           verbose=dnn_verb)

    weights = dp.get_weights()
    w3 = np.matmul(weights[1], weights[2]).reshape(d, )
    w1 = np.multiply(weights[0][:d], w3)
    w2 = np.multiply(weights[0][d:], w3)
    W = w1**2 - w2**2

    t = np.sort(np.concatenate(([0], abs(W))))

    ratio = [
        float(sum(W <= -tt)) / float(max(1, sum(W >= tt))) for tt in t[:d]
    ]
    ind = np.where(np.array(ratio) <= q)[0]
    if len(ind) == 0:
        T = float('inf')
    else:
        T = t[ind[0]]
Example #44
0
    tnf = K.tensorflow_backend
    y_true = tnf._to_tensor(y_true, dtype=tf.float32)
    y_pred = tnf._to_tensor(y_pred, dtype=tf.float32)
    return K.mean(K.abs((y_true - y_pred) / y_true))


model.compile(loss=['mean_squared_error'],
              metrics=['mean_squared_error', r_score_metric, max_error, mape],
              optimizer='adam')

model_1.compile(
    loss=['mean_squared_error'],
    metrics=['mean_squared_error', r_score_metric, max_error, mape],
    optimizer='adam')

initial_weights = model.get_weights()
initial_weights_1 = model_1.get_weights()
models = [model, model_1]
initial_weights_vec = [initial_weights, initial_weights_1]
#%%
# Compile model

#
#model = KerasRegressor(build_fn = create_model(filter_depth, filter_depth2, filter_depth3,a,a2,a3,am,
#                 am2,am3,n_dense_1, k = k),
#                        epochs = epochs,
#                        batch_size = batch_size,
#                        verbose = 1)
#

#seed = 1337
Example #45
0
import sys
import json

from keras.models import Sequential
from keras.layers.core import Dense, Activation

def save_str(file, data):
	f = open(file, 'w')
	f.write(data)
	f.close()

model = Sequential()

model.add(Dense(input_dim = 4, output_dim = 6))
model.add(Activation('relu'))

model.add(Dense(output_dim = 3))
model.add(Activation('softmax'))

wg = [x.tolist() for x in model.get_weights()]

save_str(sys.argv[1], model.to_json())
save_str(sys.argv[2], json.dumps(wg))
Example #46
0
    tnf = K.tensorflow_backend
    y_true = tnf._to_tensor(y_true, dtype=tf.float32)
    y_pred = tnf._to_tensor(y_pred, dtype=tf.float32)
    return K.mean(K.abs((y_true - y_pred) / y_true))


model.compile(loss=['mean_squared_error'],
              metrics=['mean_squared_error', r_score_metric, max_error, mape],
              optimizer='adam')

model_2.compile(
    loss=['mean_squared_error'],
    metrics=['mean_squared_error', r_score_metric, max_error, mape],
    optimizer='adam')

initial_weights = model.get_weights()
initial_weights_2 = model_2.get_weights()
models = [model, model_2]
initial_weights_vec = [initial_weights, initial_weights_2]
#%%
# Compile model

#
#model = KerasRegressor(build_fn = create_model(filter_depth, filter_depth2, filter_depth3,a,a2,a3,am,
#                 am2,am3,n_dense_1, k = k),
#                        epochs = epochs,
#                        batch_size = batch_size,
#                        verbose = 1)
#

#seed = 1337
class SupervisedModel(Model):

    """
    Class representing an abstract Supervised Model
    """

    def __init__(self, layers=list([]), activation='relu', out_activation='linear', dropout=0,
                 l1_reg=0, l2_reg=0, **kwargs):

        """
        :param layers: 
        :param activation: 
        :param out_activation: 
        :param dropout: 
        :param l1_reg: 
        :param l2_reg: 
        :param kwargs: Model's parameters
        """

        self.layers = layers
        self.activation = expand_arg(self.layers, activation)
        self.out_activation = out_activation
        self.dropout = expand_arg(self.layers, dropout)
        self.l1_reg = expand_arg(self.layers, l1_reg)
        self.l2_reg = expand_arg(self.layers, l2_reg)
        super().__init__(**kwargs)

    def validate_params(self):
        super().validate_params()
        assert self.layers and len(self.layers) > 0, 'Model must have at least one hidden layer'
        assert all([0 <= d <= 1 for d in self.dropout]), 'Invalid dropout value'
        assert all([f in valid_act_functions for f in self.activation]), 'Invalid activation function'
        assert self.out_activation in valid_act_functions, 'Invalid output activation function'
        assert all([x >= 0 for x in self.l1_reg]), 'Invalid l1_reg value'
        assert all([x >= 0 for x in self.l2_reg]), 'Invalid l2_reg value'

    def build_model(self, input_shape, n_output=1, metrics=None):

        """ Creates the computational graph for the Supervised Model.
        :param input_shape:
        :param n_output: number of output values.
        :param metrics:
        :return: self
        """

        self._model = Sequential(name=self.name)

        self._create_layers(input_shape, n_output)

        self._model.compile(optimizer=self.get_optimizer(), loss=self.loss_func, metrics=metrics)

    def _create_layers(self, input_shape, n_output):
        pass

    def fit(self, x_train, y_train, x_valid=None, y_valid=None, valid_split=0.):

        """ Fit the model to the data.
        :param x_train: Training data. shape(n_samples, n_features)
        :param y_train: Training labels. shape(n_samples, n_classes)
        :param x_valid:
        :param y_valid:
        :param valid_split:
        :return: self
        """

        x_train = self._check_x_shape(x_train)
        y_train = self._check_y_shape(y_train)

        self.build_model(x_train.shape, y_train.shape[-1])

        if x_valid is not None and y_valid is not None:
            x_valid = self._check_x_shape(x_valid)
            y_valid = self._check_y_shape(y_valid)
            valid_data = (x_valid, y_valid)
        else:
            valid_data = None

            # By default use 10% of training data for testing
            if self.early_stopping and valid_split == 0.:
                valid_split = 0.1

        self._train_step(x_train, y_train, valid_data, valid_split)

    def _train_step(self, x_train, y_train, valid_data=None, valid_split=0.):
        self._model.fit(x=x_train,
                        y=y_train,
                        batch_size=self.batch_size,
                        epochs=self.nb_epochs,
                        shuffle=False,
                        validation_data=valid_data,
                        validation_split=valid_split,
                        callbacks=self._callbacks,
                        verbose=self.verbose)

    def predict(self, x):

        """ Predict the labels for the test set.
        :param x: Testing data. shape(n_test_samples, n_features)
        :return: labels
        """

        x = self._check_x_shape(x)

        if self.loss_func == 'binary_crossentropy' or self.loss_func == 'categorical_crossentropy':
            return self._model.predict_classes(x, batch_size=self.batch_size, verbose=self.verbose)

        return self._model.predict(x=x, batch_size=self.batch_size, verbose=self.verbose)

    def predict_proba(self, x):

        """ Predict classes probabilities.
        :param x: Testing data. shape(n_test_samples, n_features)
        :return: probabilities
        """

        if self.loss_func != 'binary_crossentropy' or self.loss_func != 'categorical_crossentropy':
            raise TypeError('Model is not configured to predict classes probabilities. Please, use \
                            "binary_crossentropy" or "categorical_crossentropy" as loss function!')
        
        x = self._check_x_shape(x)

        probs = self._model.predict_proba(x, batch_size=self.batch_size, verbose=self.verbose)

        # check if binary classification
        if probs.shape[1] == 1:
            # first column is probability of class 0 and second is of class 1
            probs = np.hstack([1 - probs, probs])
        return probs

    def score(self, x, y):

        """ Evaluate the model on (x, y).
        :param x: Input data
        :param y: Target values
        :return:
        """

        x = self._check_x_shape(x)
        y = self._check_y_shape(y)

        loss = self._model.evaluate(x=x, y=y, batch_size=self.batch_size, verbose=self.verbose)

        if isinstance(loss, list):
            return loss[0]
        return loss

    def get_model_parameters(self):

        """ Return the model parameters in the form of numpy arrays.
        :return: model parameters
        """

        return self._model.get_weights()

    def get_config(self):
        conf = super().get_config()
        layers = []
        for l in self.layers:
            if isinstance(l, int):
                layers.append(l)
            else:
                layers.append(l.to_json()['model'])
        conf['layers'] = layers
        return conf

    @classmethod
    def from_config(cls, config):
        layers = []
        for l in config['layers']:
            if isinstance(l, dict):
                layers.append(model_from_config(l))
            else:
                layers.append(l)
        config['layers'] = layers
        return cls(**config)

    def _check_x_shape(self, x):
        return x

    def _check_y_shape(self, y):
        y = np.array(y)

        if len(y.shape) == 1:
            if self.loss_func == 'categorical_crossentropy':
                return to_categorical(y)

            return np.reshape(y, (y.shape[0], 1))

        return y
Example #48
0
model.add(Dense(1, activation="sigmoid", kernel_initializer="uniform"))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# model = Sequential()
# model.add(Dense(12, input_dim=8, activation="relu", kernel_initializer="uniform"))
# model.add(Dense(8, activation="relu", kernel_initializer="uniform"))
# model.add(Dense(1, activation="sigmoid", kernel_initializer="uniform"))
#
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

# serialize model to JSON
model_json = model.to_json()
model_weights = model.get_weights()
# print()
model_weights_json = pd.Series(model_weights).to_json(orient='values')
# print(type(model_weights))
# print(type(model_weights[0]))
# for i in model_weights:
#     print(type(i))
# print(type(model_weights))
# model.set_weights(model_weights)
# print(model.summary())
# def weights_from_json(model_weights_json):
# 	json_load = json.loads(model_weights_json)
# 	model_weights_list = np.array(json_load)
# 	model_weights = []
# 	for i in model_weights_list:
# 		model_weights.append(np.array(i,dtype=np.float32))
Example #49
0
In [47]: regr.intercept_
Out[47]: 152.91886182616167
"""

###Linear Regression with keras
# Initialize Network
model = Sequential()
model.add(Dense(1, input_dim=1,init='uniform'))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='sgd')

model.fit(diabetes_X_train, diabetes_Y_train, nb_epoch=20000, batch_size=64,verbose=False)

"""
Took about 100 seconds on my chromebook without using multiple CPUs
In [68]: model.get_weights()
Out[68]: 
[array([[ 936.47363281]], dtype=float32),
 array([ 152.80149841], dtype=float32)]
"""

#Make lines and plot for both
w1,w0 = model.get_weights()
tt = np.linspace(np.min(diabetes_X[:, 0]), np.max(diabetes_X[:, 0]), 10)
nn_line = w0+w1*tt
lreg_line = regr.intercept_+regr.coef_*tt 

plt.plot(diabetes_X[:,0],diabetes['target'],'kx',tt,lreg_line,'r-',tt,nn_line[0],'b--')
plt.show()
Example #50
0
# Plot the impages starting from i = 1
for i, ax in enumerate(axes.flat):
    a = i
    im = np.reshape(X_test_k[a], (28, 28))
    ax.imshow(im, cmap='binary')
    ax.text(0.95,
            0.05,
            'n={0}'.format(model.predict_classes(X_test_k[i:i + 1])),
            ha='right',
            transform=ax.transAxes,
            color='blue')

    ax.set_xticks([])
    ax.set_yticks([])

wts = np.array(model.get_weights())
#print(wts)
print(wts.shape)
#print(wts[0].shape)
#print(wts[1].shape)
#print(wts[2].shape)
#print(wts[3].shape)
#print(wts[2])

W1, b1, W2, b2 = model.get_weights()
print(W1, b1, W2, b2)
print(W1.shape)
print(b1.shape)
print(W2.shape)
print(b2.shape)
Example #51
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=["accuracy"])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model.to_yaml(), 'weights': pickle.dumps(model.get_weights())}
minibatch_size = 50
model.fit(X, Y, epochs=20, batch_size=minibatch_size)

# 結果確認
X_, Y_ = shuffle(X, Y)
classes = model.predict_classes(X_[0:10], batch_size=minibatch_size)
prob = model.predict_proba(X_[0:10], batch_size=1)

print('classified:')
print(np.argmax(model.predict(X_[0:10]), axis=1) == classes)
print()
print('output probability:')
print(prob)

# グラフ描画
print(model.get_weights())
w = model.get_weights()[0]
b = model.get_weights()[1]


def border(x, c1, c2):
    return ((w[0, c1] - w[0, c2]) * x - b[c1] + b[c2]) / (w[1, c2] - w[1, c1])


plt.plot(X1[:, 0], X1[:, 1], "o")
plt.plot(X2[:, 0], X2[:, 1], "o")
plt.plot(X3[:, 0], X3[:, 1], "o")
plt.plot([-2, 5], [border(-2, 0, 1), border(5, 0, 1)])
plt.plot([-2, 12], [border(-2, 1, 2), border(12, 1, 2)])
plt.show()
Example #53
0
from keras.optimizers import SGD, Adam, RMSprop
import sklearn.metrics as metrics

model = Sequential()
model.add(Dense(4,input_shape=(nrows*ncols,)))
model.add(Activation('sigmoid'))
model.add(Dense(5))
model.add(Activation('softmax'))

sgd = SGD()
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

h = model.fit(train_flat, train_labs, batch_size = 32, nb_epoch=4, validation_data = (test_flat,test_labs), verbose=1)

#W1,b1 = model.get_weights()
W1,b1,W2,b2 = model.get_weights()
num_param = 1024*4 + 4 + 4*5 + 5

sx, sy = (4,1)
f, con = plt.subplots(sx,sy, sharex='col', sharey='row')
con = con.reshape(sx,sy)
for xx in range(sx):
    for yy in range(sy):
        con[xx,yy].pcolormesh(W1[:,sy*xx+yy].reshape(nrows,ncols), cmap=plt.cm.hot) 

preds = np.argmax(model.predict(test_flat),axis=1)
labs = np.argmax(test_labs,axis=1)
conf = metrics.confusion_matrix(labs,preds)
predsp = model.predict_proba(test_flat)
aic = 2* num_param - 2*metrics.log_loss(np.argmax(test_labs,axis=1),predsp)
Example #54
0
######################
train_X, train_Y = make_regression(n_features=1, noise=5.0, random_state=0)

#######################
### Construct model ###
#######################
model = Sequential()
model.add(Dense(1, activation='linear', input_shape=(1,)))

optimizer = SGD(lr=learning_rate)
model.compile(loss='mse', optimizer=optimizer)

###################
### Train model ###
###################
model.fit(train_X, train_Y, batch_size=1, nb_epoch=nb_epoch)

######################
### Evaluate model ###
######################
model.get_weights()
weight = model.get_weights()[0][0, 0]
bias = model.get_weights()[1][0]
print("W=", weight, "b=", bias)

x = np.linspace(-2.5, 2.5, 100)
y = x * weight + bias
plt.plot(x, y, c='r')
plt.scatter(train_X, train_Y)
plt.show()
Example #55
0
class KerasWrapper(object):
    def __init__(self, parameters=Parameters()):
        self.params = parameters
        self.data_cleaner = DataCleaner()
        self.pre_processor = PreProcessor()
        self.load_data()
        self.model_constructed = False
        self.train_completed = False

    def clean_data(self):
        self.data_cleaner.load_data()
        self.data_cleaner.clean()
        self.data_cleaner.save_cleaned()

    def load_data(self):
        self.data = pd.read_csv(self.params.file_path)
        self.raw_data = self.data.copy(deep=True)
        self.data_cleaner.load_data()

    def __prepare_data(self):

        self.data = self.pre_processor.convert_objects_to_categorical(self.data, self.params.converting_columns)
        self.data = self.pre_processor.normalize_data(self.data, self.params.converting_columns)
        self.data.fillna(-1, inplace=True)

        self.inputs = self.data[self.params.input_params]
        self.outputs = self.data[self.params.output_params]

        excluded_input_data = self.inputs.drop(self.inputs.index[self.params.excluded_rows])
        excluded_output_data = self.outputs.drop(self.outputs.index[self.params.excluded_rows])
        X = excluded_input_data.values
        y = excluded_output_data.values
        y = np_utils.to_categorical(y)

        return X, y

    def create_model(self, summary=True):

        X, y = self.__prepare_data()

        if (self.model_constructed):
            self.model.set_weights(self.network_weights)
            return X, y

        dimof_input = X.shape[1]
        dimof_output = np.max(y) + 1

        if (summary):
            print('dimof_input: ', dimof_input)
            print('dimof_output: ', dimof_output)
            print('batch_size: ', self.params.batch_size)
            print('dimof_middle: ', self.params.nodes)
            print('dropout: ', self.params.dropout)
            print('countof_epoch: ', self.params.epochs)
            print('verbose: ', self.params.verbose)
            print()

        self.model = Sequential()
        self.model.add(
            Dense(self.params.nodes, input_dim=dimof_input, init='uniform', activation=self.params.activation))
        self.model.add(BatchNormalization(beta_init='uniform'))
        self.model.add(
            Dense(self.params.nodes * 2, input_dim=dimof_input, init='uniform', activation=self.params.activation))
        self.model.add(Dropout(self.params.dropout))
        self.model.add(Dense(dimof_output, input_dim=dimof_input, init='uniform', activation='softmax'))
        self.model.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])

        weight_ref = self.model.get_weights()
        self.network_weights = np.empty_like(weight_ref)
        self.network_weights[:] = weight_ref

        if (summary):
            self.model.summary()

        self.model_constructed = True

        return X, y

    def start_train(self, input_data, output_data):
        callbacks = []
        if (self.params.early):
            callbacks.append(
                EarlyStopping(patience=self.params.patience, verbose=self.params.verbose, monitor='val_loss'))
        fit = self.model.fit(input_data, output_data, validation_split=0.2,
                             batch_size=self.params.batch_size, nb_epoch=self.params.epochs,
                             verbose=self.params.verbose, shuffle=True, callbacks=callbacks)
        self.train_completed = True
        return fit

    def evaluate(self, input, output):
        loss, accuracy = self.model.evaluate(input, output, verbose=self.params.verbose)
        return loss, accuracy

    def run_for_all_characters(self):
        self.raw_data['death'] = np.nan
        self.raw_data['live'] = np.nan
        self.raw_data.sort_values('popularity', ascending=False)
        index = self.raw_data.head(100).index.tolist()
        self.params.excluded_rows = []

        # index = self.raw_data.index.tolist()

        for i in index:
            self.params.excluded_rows.append(i)
            self.start_whole_process()
            self.prediction()
            self.params.excluded_rows = []
        self.params.excluded_rows = []

    def prediction(self):

        predictions = []
        for i in self.params.excluded_rows:
            chosen_class = self.model.predict_classes(
                self.inputs.iloc[i].values.reshape((1, len(self.params.input_params))),
                verbose=0)
            probability = self.model.predict_proba(
                self.inputs.iloc[i].values.reshape((1, len(self.params.input_params))),
                verbose=0)
            character = str(self.raw_data['name'][i])

            # rounding on 2 decimals
            death = int((probability[0][0] * 100) + 0.5) / 100.0
            life = int((probability[0][1] * 100) + 0.5) / 100.0

            self.raw_data.set_value(i, 'death', 0)
            self.raw_data.set_value(i, 'live', 0)

            data = (i, character, str(death), str(life))
            predictions.append(data)
            self._prediction_summary(chosen_class, probability, character)

        return predictions

    def start_whole_process(self):
        X, y = self.create_model()
        self.start_train(X, y)
        return self.evaluate(X, y)

    def _prediction_summary(self, chosen_class, probability, character):
        print('Name: ' + character)
        print('Dead: ' + str(probability[0][0]) + ' %')
        print('Alive: ' + str(probability[0][1]) + ' %')
        print('Chosen class: ' + str(chosen_class))
        print(30 * '-')
Example #56
0
    #model.add(Dense(60, init='glorot_uniform'))
    #model.add(Activation('tanh'))
    #model.add(Dense(30, input_dim=60, init='glorot_uniform'))
    #model.add(Activation('tanh'))
    #model.add(Dense(25, input_dim=20, init='glorot_uniform'))
    #model.add(Activation('tanh'))
    #model.add(Dense(15, input_dim=25, init='glorot_uniform'))
    #model.add(Activation('tanh'))
    model.add(Dense(10, init='glorot_uniform'))
    model.add(Activation('tanh'))
    model.add(Dense(1, init='glorot_uniform'))
    model.add(Activation('linear'))
    sgd = SGD(lr=0.01, decay=0.0, momentum=0.0, nesterov=False)
    rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
    L = model.compile(loss='mse',optimizer=rmsprop)
    print model.get_weights()
    H = model.fit(data_train, pCO2_list_train,nb_epoch=1600,callbacks=[earlyStopping],batch_size=20,validation_data=(data_eval,pCO2_list_eval))
    score = model.evaluate(data_val, pCO2_list_val, batch_size=20)
    print model.get_weights()
    print model.summary()
    print model.get_config()

    preds1 = model.predict(data_train)
    preds_list = preds1[:,0]
    pCO2_matrix = preds_list * npy.nanstd(pCO2_list) + npy.nanmean(pCO2_list)
    pCO2_list_test = pCO2_list_train * npy.nanstd(pCO2_list) + npy.nanmean(pCO2_list)

    print 'TEST!!!!!!!!!!!!!!!!!!!'
    print 'TOTAL'
    print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
    print'Validation'
model.add(Dense(6, input_dim = 10, W_regularizer=l2(0.005)))
model.add(Activation('relu'))
model.add(Dense(6, input_dim = 6, W_regularizer=l2(0.005)))
model.add(Activation('relu'))

model.add(Dense(6, input_dim = 6, W_regularizer=l2(0.005)))
model.add(Activation('relu'))
model.add(Dense(1, input_dim = 6, W_regularizer=l2(0.005)))
model.add(Activation('linear'))

model.compile(loss="mean_squared_error", optimizer="rmsprop")
train_x_set, train_y_set, test_x_set, test_y_set = load_data()

model.fit(train_x_set, train_y_set, batch_size=3500, nb_epoch=5000, validation_split=0.05)
print(model.get_weights())
predicted = model.predict(test_x_set)
rmse = np.sqrt(((predicted - test_y_set) ** 2).mean())


print("预测值:")
print(predicted.T)
print("实际:")
print(test_y_set.T)
print(rmse)

#num = 0



#for i in range(len(test_y_set)):
np.random.shuffle(data)
X = data[:, :-1]
y = data[:, -1]

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

model = Sequential()

model.add(Dense(14, activation='relu', input_dim=len(names)))
# model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))


# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

tbCallBack = keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)

model.fit(X_train, y_train, epochs=200, callbacks=[tbCallBack])

score = model.evaluate(X_test, y_test)

model.save_weights('weights')

# Save weights for external usage
df = pd.DataFrame(model.get_weights())
df.to_csv("weights.csv", header=False)

Example #59
0
model.add(Dense(input_dim=X_train.shape[1], output_dim=50, init = 'uniform',activation='tanh',bias = True))
#%%
model.add(Dense(input_dim=50,output_dim=50,init = 'uniform', activation='tanh', bias = True))
#%%
#model.add(Dense(input_dim=50, output_dim = 25,init = 'uniform',activation = 'tanh', bias = True))
#%%
model.add(Dense(input_dim=50, output_dim=y_train.shape[1], init = 'uniform', activation='softmax'))
sgd = SGD(lr=0.001, decay=1e-5, momentum=.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

#%%
layer = model.layers

print layer
#%%
weight = model.get_weights()
print weight
#%%
model.fit(X_train, y_train, nb_epoch=50,  batch_size=300, verbose=0, validation_split=0.1, 
          show_accuracy=True)
          
#%%
print (theano.config.floatX)
print (theano.config.device)
#%%
from sklearn.metrics import accuracy_score, confusion_matrix

y_train_pred = model.predict_classes(X_train, verbose=0)
accS = accuracy_score(Y_train, y_train_pred)
conf = confusion_matrix(Y_train, y_train_pred)
#%%
training_data = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 1], [1, 1, 0],
                          [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 1]],
                         "float32")

#the four expected results in the same order
target_data = np.array([[-1], [-1], [-1], [-1], [1], [1], [1], [1]], "float32")

#create the model of the neural network
model = Sequential()

#output layer with 1 neuron and sigmoid activation
model.add(Dense(1, input_dim=3, activation='tanh'))

#cofigure the learning process
model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=['accuracy'])

#training the neural network
model.fit(training_data, target_data, nb_epoch=5000, verbose=1)

#plot the model of the neural network
plot_model(model,
           to_file='model_plot.png',
           show_shapes=True,
           show_layer_names=True)

print(model.evaluate(training_data, target_data))
print(model.get_weights())
print(model.predict_classes(training_data))  #print the result