def get_dae_clf():
    model1 = Sequential()

    model1.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

    # Encoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(AveragePooling2D((2, 2), padding="same"))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

    # Decoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(UpSampling2D((2, 2)))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

    model1.add(Lambda(lambda x_: x_ - 0.5))

    model1.load_weights("./dae/mnist")
    model1.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')

    model2 = Sequential()

    model2.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(32, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Flatten())
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dropout(0.5))
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dense(10))

    model2.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    model2.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    model = Sequential()
    model.add(model1)
    model.add(model2)
    model.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    return model
Exemple #2
0
def build_models(seq_len=12, num_classes=4, load_weights=False):
    # DST-Net: ResNet50
    resnet = ResNet50(weights='imagenet', include_top=False)
    for layer in resnet.layers:
        layer.trainable = False
    resnet.load_weights('model/resnet.h5')
    # DST-Net: Conv3D + Bi-LSTM
    inputs = Input(shape=(seq_len, 7, 7, 2048))
    # conv1_1, conv3D and flatten
    conv1_1 = TimeDistributed(Conv2D(128, 1, 1, activation='relu'))(inputs)
    conv3d = Conv3D(64, 3, 1, 'SAME', activation='relu')(conv1_1)
    flatten = Reshape(target_shape=(seq_len, 7 * 7 * 64))(conv3d)
    # 2 Layers Bi-LSTM
    bilstm_1 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=True))(flatten)
    bilstm_2 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=False))(bilstm_1)
    outputs = Dense(num_classes, activation='softmax')(bilstm_2)
    dstnet = Model(inputs=inputs, outputs=outputs)
    dstnet.compile(loss='categorical_crossentropy',
                   optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True))
    # load models
    if load_weights:
        dstnet.load_weights('model/dstnet.h5')
    return resnet, dstnet
def get_clf():
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    model.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dense(10))

    model.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    return model
Exemple #4
0
    def __init__(self, dataset, model):
        super(CNNCertBase, self).__init__(dataset, model)

        self.num_classes = get_num_classes(dataset)

        input_shape = get_input_shape(dataset)
        new_input_shape = (input_shape[1], input_shape[2], input_shape[0])
        self.k_model = sequential_torch2keras(model_transform(self.model, input_shape), dataset)

        global graph
        global sess
        with sess.as_default():
            with graph.as_default():
                # Save the transformed Keras model to a temporary place so that the tool can read from file
                # The tool can only init model from file...
                sgd = SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)
                self.k_model.compile(loss=fn,
                              optimizer=sgd,
                              metrics=['accuracy'])
                self.k_model.save('tmp/tmp.pt')

                self.new_model = nl.CNNModel('tmp/tmp.pt', new_input_shape)
                self.weights = self.new_model.weights
                self.biases = self.new_model.biases

                # print(self.new_model.summary())
                try:
                    check_consistency(self.model, self.k_model, input_shape)
                except Exception:
                    raise Exception("Somehow the transformed model behaves differently from the original model.")

        self.LP = False
        self.LPFULL = False
        self.method = "ours"
        self.dual = False
Exemple #5
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """

    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    for param in params:
        model.add(Dense(param))
        # ReLU activation
        model.add(Activation('relu'))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
    def train_inception(self, x_train, y_train, learn_rate=0.001, epoch=5, batch_size=128, validation_split_size=0.2, train_callbacks=()):

        history = LossHistory()

        X_train, X_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=validation_split_size)

        # this is the model we will train
        self.classifier = Model(inputs=self.base_model.input, outputs=self.predictions)

        # first: train only the top layers (which were randomly initialized)
        # i.e. freeze all convolutional InceptionV3 layers
        for layer in base_model.layers:
            layer.trainable = False

        opt = RMSprop(lr=learn_rate)

        # compile the model (should be done *after* setting layers to non-trainable)
        self.classifier.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])

        earlyStopping = EarlyStopping(monitor='val_loss', patience=3, verbose=0, mode='auto')

        # train the model on the new data for a few epochs
        self.classifier.fit_generator(...)

        # at this point, the top layers are well trained and we can start fine-tuning
        # convolutional layers from inception V3. We will freeze the bottom N layers
        # and train the remaining top layers.

        # let's visualize layer names and layer indices to see how many layers
        # we should freeze:
        for i, layer in enumerate(base_model.layers):
           print(i, layer.name)

        # we chose to train the top 2 inception blocks, i.e. we will freeze
        # the first 249 layers and unfreeze the rest:
        for layer in self.classifier.layers[:249]:
           layer.trainable = False
        for layer in self.classifier.layers[249:]:
           layer.trainable = True

        # we need to recompile the mode for these modifications to take effect
        # we use SGD with a low learning rate

        opt_sgd = SGD(lr=0.0001, momentum=0.9)
        
        self.classifier.compile(optimizer=opt_sgd, loss='categorical_crossentropy', metrics=['accuracy'])

        # we train our model again (this time fine-tuning the top 2 inception blocks
        # alongside the top Dense layers
        self.classifier.fit_generator(...)

        self.classifier.fit(X_train, y_train,
                            batch_size=batch_size,
                            epochs=epoch,
                            verbose=1,
                            validation_data=(X_valid, y_valid),
                            callbacks=[history, *train_callbacks, earlyStopping])
        fbeta_score = self._get_fbeta_score(self.classifier, X_valid, y_valid)
        return [history.train_losses, history.val_losses, fbeta_score]
def train(data, file_name, params, num_epochs=50, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """
    
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    n = 0
    for param in params:
        n += 1
        model.add(Dense(param, kernel_initializer='he_uniform'))
        # ReLU activation
        if activation == "arctan":
            model.add(Lambda(lambda x: tf.atan(x), name=activation+"_"+str(n)))
        else:
            model.add(Activation(activation, name=activation+"_"+str(n)))
    # the output layer, with 10 classes
    model.add(Dense(10, kernel_initializer='he_uniform'))
    
    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted/train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn,
                  optimizer=optimizer,
                  metrics=['accuracy'])
    
    model.summary()
    print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data, data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)
    

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)
    
    return {'model':model, 'history':history}
Exemple #8
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a 2-layer simple network for MNIST and CIFAR
    """
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # first dense layer (the hidden layer)
    model.add(Dense(params[0]))
    # \alpha = 10 in softplus, multiply input by 10
    model.add(Lambda(lambda x: x * 10))
    # in Keras the softplus activation cannot set \alpha
    model.add(Activation('softplus'))
    # so manually add \alpha to the network
    model.add(Lambda(lambda x: x * 0.1))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    # run training with given dataset, and print progress
    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return model
Exemple #9
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None):
    """
    Standard neural network training procedure.
    """
    model = Sequential()

    print(data.train_data.shape)

    model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:]))
    model.add(Activation('relu'))
    model.add(Conv2D(params[1], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[2], (3, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(params[3], (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[4]))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(params[5]))
    model.add(Activation('relu'))
    model.add(Dense(10))

    if init != None:
        model.load_weights(init)

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    if file_name != None:
        model.save(file_name)

    return model
Exemple #10
0
def train(inputs,
          labels,
          num_classes=10,
          model=None,
          params=None,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """

    # # create a Keras sequential model
    if model is None:
        nlayer_model = NLayerModel(params, num_classes=num_classes)
        model = nlayer_model.model

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    print('=================')
    print("training")
    print('=================')
    # run training with given dataset, and print progress
    history = model.fit(inputs,
                        labels,
                        batch_size=batch_size,
                        validation_data=(inputs, labels),
                        epochs=num_epochs,
                        shuffle=True)

    # # save model to a file
    # if file_name != None:
    #     model.save(file_name)
    print('=================')
    print('finished training')
    print('==================')
    return {'model': nlayer_model, 'history': None}
Exemple #11
0
def convert(file_name, new_name, cifar=False):
    if not cifar:
        eq_weights, new_params = get_weights(file_name)
        data = MNIST()
    else:
        eq_weights, new_params = get_weights(file_name, inp_shape=(32, 32, 3))
        data = CIFAR()
    model = Sequential()
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    for param in new_params:
        model.add(Dense(param))
        model.add(Lambda(lambda x: tf.nn.relu(x)))
    model.add(Dense(10))

    for i in range(len(eq_weights)):
        try:
            print(eq_weights[i][0].shape)
        except:
            pass
        model.layers[i].set_weights(eq_weights[i])

    sgd = SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.save(new_name)
    acc = model.evaluate(data.validation_data, data.validation_labels)[1]
    printlog("Converting CNN to MLP")
    nlayer = file_name.split('_')[-3][0]
    filters = file_name.split('_')[-2]
    kernel_size = file_name.split('_')[-1]
    printlog(
        "model name = {0}, numlayer = {1}, filters = {2}, kernel size = {3}".
        format(file_name, nlayer, filters, kernel_size))
    printlog("Model accuracy: {:.3f}".format(acc))
    printlog("-----------------------------------")
    return acc
Exemple #12
0
import numpy as np 
from tensorflow.contrib.keras.api.keras.models import Sequential, model_from_json
from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.api.keras.optimizers import SGD
import tensorflow.contrib.lite as lite

model = Sequential()
model.add(Dense(8, input_dim = 2))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = SGD(lr = 0.1))
model.fit(
    np.array([[0, 0], [0, 1.0], [1.0, 0], [1.0, 1.0]]),
    np.array([[0.0], [1.0], [1.0], [0.0]]),
    batch_size = 1, epochs = 300)
model.save('xor_model.h5')

converter = lite.TFLiteConverter.from_keras_model_file("xor_model.h5")
tflite_model = converter.convert()
open("xor_model.tflite", "wb").write(tflite_model)
def train_autoencoder(data,
                      codec,
                      batch_size=1000,
                      epochs=1000,
                      saveFilePrefix=None,
                      use_tanh=True,
                      train_imagenet=False,
                      aux_num=0):
    """Train autoencoder

    python3 train_CAE.py -d mnist --compress_mode 1 --epochs 10000 --save_prefix mnist

    """

    ckptFileName = saveFilePrefix + "ckpt"

    encoder_model_filename = saveFilePrefix + "encoder.json"
    decoder_model_filename = saveFilePrefix + "decoder.json"
    encoder_weight_filename = saveFilePrefix + "encoder.h5"
    decoder_weight_filename = saveFilePrefix + "decoder.h5"

    if os.path.exists(decoder_weight_filename):
        print("Load the pre-trained model.")
        codec.decoder.load_weights(decoder_weight_filename)
    elif os.path.exists(ckptFileName):
        print("Load the previous checkpoint")
        codec.decoder.load_weights(ckptFileName)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    codec.decoder.compile(loss='mse', optimizer=sgd)

    checkpointer = ModelCheckpoint(filepath=ckptFileName,
                                   verbose=1,
                                   save_best_only=True)

    if train_imagenet:
        # train using Keras imageGenerator
        print("Training using imageGenerator:")
        codec.decoder.fit_generator(
            data.train_generator_flow,
            steps_per_epoch=100,
            epochs=epochs,
            validation_data=data.validation_generator_flow,
            validation_steps=100,
            callbacks=[checkpointer])
    else:
        # in-memory training
        x_train = data.validation_data
        # add zeros for correction
        if aux_num > 0:
            x_train = np.concatenate(
                (x_train, np.zeros((aux_num, ) + x_train.shape[1:])), axis=0)
            x_train = np.concatenate(
                (x_train, 0.5 * np.ones((aux_num, ) + x_train.shape[1:])),
                axis=0)
            x_train = np.concatenate(
                (x_train, -0.5 * np.ones((aux_num, ) + x_train.shape[1:])),
                axis=0)

        y_train = x_train
        x_test = data.test_data
        y_test = x_test
        print("In-memory training:")
        print("Shape of training data:{}".format(x_train.shape))
        print("Shape of testing data:{}".format(x_test.shape))

        codec.decoder.fit(x_train,
                          y_train,
                          batch_size=batch_size,
                          validation_data=(x_test, y_test),
                          epochs=epochs,
                          shuffle=True,
                          callbacks=[checkpointer])

    print("Checkpoint is saved to {}\n".format(ckptFileName))

    model_json = codec.encoder.to_json()
    with open(encoder_model_filename, "w") as json_file:
        json_file.write(model_json)
    print(
        "Encoder specification is saved to {}".format(encoder_model_filename))

    codec.encoder.save_weights(encoder_weight_filename)
    print("Encoder weight is saved to {}\n".format(encoder_weight_filename))

    model_json = codec.decoder.to_json()
    with open(decoder_model_filename, "w") as json_file:
        json_file.write(model_json)

    print(
        "Decoder specification is saved to {}".format(decoder_model_filename))

    codec.decoder.save_weights(decoder_weight_filename)
    print("Decoder weight is saved to {}\n".format(decoder_weight_filename))
Exemple #14
0
def train_cnn_7layer(data,
                     file_name,
                     params,
                     num_epochs=50,
                     batch_size=256,
                     train_temp=1,
                     init=None,
                     lr=0.01,
                     decay=1e-5,
                     momentum=0.9,
                     activation="relu",
                     optimizer_name="sgd"):
    """
    Train a 7-layer cnn network for MNIST and CIFAR (same as the cnn model in Clever)
    mnist: 32 32 64 64 200 200 
    cifar: 64 64 128 128 256 256
    """

    # create a Keras sequential model
    model = Sequential()

    print("training data shape = {}".format(data.train_data.shape))

    # define model structure
    model.add(Conv2D(params[0], (3, 3), input_shape=data.train_data.shape[1:]))
    model.add(Activation(activation))
    model.add(Conv2D(params[1], (3, 3)))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[2], (3, 3)))
    model.add(Activation(activation))
    model.add(Conv2D(params[3], (3, 3)))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[4]))
    model.add(Activation(activation))
    model.add(Dropout(0.5))
    model.add(Dense(params[5]))
    model.add(Activation(activation))
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=None,
                         decay=decay,
                         amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=optimizer, metrics=['accuracy'])

    model.summary()
    print("Traing a {} layer model, saving to {}".format(
        len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)

    return {'model': model, 'history': history}
Exemple #15
0
import tensorflow.contrib.keras.api.keras as keras
from tensorflow.contrib.keras.api.keras.models import Sequential
from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.api.keras.optimizers import SGD

import numpy as np

x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)),
                                     num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)),
                                    num_classes=10)

model = Sequential()
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=20, batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
y_pred = model.predict(np.random.random((1, 20)), batch_size=128)
print(y_pred)