コード例 #1
0
    def _pretrain_layers(self, pretrain_data, batch_size=64, epochs=10, verbose=0):
        """
        Pretrain layers using stacked auto-encoders
        Parameters
        ----------
        pretrain_data : 2d array_lay, (N,D)
            Data to use for pretraining. Can be the same as used for training
        batch_size : int, optional
        epochs : int, optional
        verbose : int, optional
            Verbosity level. Passed to Keras fit method
        Returns
        -------
            None. Layers trained in place
        """
        if verbose:
            print('{time}: Pretraining {num_layers:d} layers'.format(time=datetime.datetime.now(), num_layers=len(self._all_layers)))

        for ind, end_layer in enumerate(self._all_layers):
            # print('Pre-training layer {0:d}'.format(ind))
            # Create AE and training
            cur_layers = self._all_layers[0:ind+1]
            ae = models.Sequential(cur_layers)

            decoder = layers.Dense(pretrain_data.shape[1], activation='linear')
            ae.add(decoder)

            ae.compile(loss='mean_squared_error', optimizer='rmsprop')
            ae.fit(pretrain_data, pretrain_data, batch_size=batch_size, epochs=epochs,
                   verbose=verbose)

        self.model = models.Sequential(self._all_layers)

        if verbose:
            print('{time}: Finished pretraining'.format(time=datetime.datetime.now()))
コード例 #2
0
    def model_definition(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=self.input_shape))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        self.model.add(layers.AveragePooling2D())
        self.model.add(layers.Conv2D(128, (1, 1), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))

        adam = optimizers.Adamax()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['acc'])
コード例 #3
0
def classifier_model():

    model = models.Sequential()
    model.add(
        layers.Conv2D(NUM_FILTERS_1, [3, 3],
                      strides=(2, 2),
                      padding='same',
                      activation='relu',
                      input_shape=(28, 28, 1),
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    model.add(
        layers.Conv2D(NUM_FILTERS_2, [3, 3],
                      strides=(2, 2),
                      padding='same',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    model.add(layers.Flatten())
    model.add(
        layers.Dense(NUM_CLASSES,
                     kernel_initializer=initializers.glorot_normal(),
                     bias_initializer=initializers.Zeros()))

    return model
コード例 #4
0
    def olliNetwork(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=(48, 48, 1)))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (4, 4), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))
コード例 #5
0
    def fit(self, training_data, training_betas=None, epochs=10, verbose=0):
        """
        Train the neural network model using provided `training_data`
        Parameters
        ----------
        training_data : 2d array_like (N, D)
            Data on which to train the tSNE model
        training_betas : 1d array_like (N,), optional
            Widths for gaussian kernel. If `None` (the usual case), they will be calculated based on
            `training_data` and self.perplexity. One can also provide them here explicitly.
        epochs: int, optional
        verbose: int, optional
            Default 0. Verbosity level. Passed to Keras fit method

        Returns
        -------
        None. Model trained in place
        """

        assert training_data.shape[
            1] == self.num_inputs, "Input training data must be same shape as training `num_inputs`"

        self._training_betas = training_betas
        self._epochs = epochs

        if self._training_betas is None:
            training_betas = self._calc_training_betas(training_data,
                                                       self.perplexity)
            self._training_betas = training_betas

        if self.do_pretrain:
            self._pretrain_layers(training_data,
                                  batch_size=self._batch_size,
                                  epochs=epochs,
                                  verbose=verbose)
        else:
            self.model = models.Sequential(self._all_layers)

        self.model.compile(self._optimizer, self._loss_func)

        train_generator = self._make_train_generator(training_data,
                                                     training_betas,
                                                     self._batch_size)

        batches_per_epoch = int(training_data.shape[0] // self._batch_size)

        if verbose:
            print('{time}: Beginning training on {epochs} epochs'.format(
                time=datetime.datetime.now(), epochs=epochs))
        self.model.fit_generator(train_generator,
                                 batches_per_epoch,
                                 epochs,
                                 verbose=verbose)

        if verbose:
            print('{time}: Finished training on {epochs} epochs'.format(
                time=datetime.datetime.now(), epochs=epochs))
コード例 #6
0
ファイル: utils.py プロジェクト: qkuang/tf_gbds
def get_network(name,
                input_dim,
                output_dim,
                hidden_dim,
                num_layers,
                PKLparams=None,
                batchnorm=False,
                is_shooter=False,
                row_sparse=False,
                add_pklayers=False,
                filt_size=None):
    """Return a NN with the specified parameters and a list of PKBias layers.
    """
    with tf.variable_scope(name):
        M = models.Sequential(name=name)
        PKbias_layers = []
        M.add(layers.InputLayer(input_shape=(None, input_dim), name="Input"))
        if batchnorm:
            M.add(layers.BatchNormalization(name="BatchNorm"))
        if filt_size is not None:
            M.add(
                layers.ZeroPadding1D(padding=(filt_size - 1, 0),
                                     name="ZeroPadding"))
            M.add(
                layers.Conv1D(filters=hidden_dim,
                              kernel_size=filt_size,
                              padding="valid",
                              activation=tf.nn.relu,
                              name="Conv1D"))

        for i in range(num_layers):
            with tf.variable_scope("PK_Bias"):
                if is_shooter and add_pklayers:
                    if row_sparse:
                        PK_bias = PKRowBiasLayer(M,
                                                 PKLparams,
                                                 name="PKRowBias_%s" % (i + 1))
                    else:
                        PK_bias = PKBiasLayer(M,
                                              PKLparams,
                                              name="PKBias_%s" % (i + 1))
                    PKbias_layers.append(PK_bias)
                    M.add(PK_bias)

            if i == num_layers - 1:
                M.add(
                    layers.Dense(output_dim,
                                 activation="linear",
                                 name="Dense_%s" % (i + 1)))
            else:
                M.add(
                    layers.Dense(hidden_dim,
                                 activation="relu",
                                 name="Dense_%s" % (i + 1)))

        return M, PKbias_layers
コード例 #7
0
def create_model(dropout_rate):
    model = models.Sequential()
    conv_base = applications.VGG16(include_top=False,
                                   input_shape=(150, 150, 3),
                                   weights='imagenet')
    conv_base.trainable = False
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    return model
コード例 #8
0
def test_pkrowbiaslayer():
    batch_size = 16
    num_inputs = 8
    NN = models.Sequential()
    NN.add(layers.InputLayer(batch_input_shape=(batch_size, num_inputs)))
    params = {'a': 1, 'b': 1}
    nbatches = 4
    Input = np.random.randn(1, num_inputs)

    l = PKRowBiasLayer(NN, params)
    assert isinstance(l, layers.Layer)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        npt.assert_array_equal(l.mode.eval(), np.zeros(4))
        alpha = np.tile((params['a'] + num_inputs / 2), (4, 1))
        npt.assert_array_equal(l.alpha.eval(), alpha)
        beta = np.tile(params['b'], (4, 1))
        npt.assert_array_equal(l.beta.eval(), beta)

        sigma = np.log(np.exp(l.unc_sig.eval()) + 1)
        npt.assert_allclose(l.sigma.eval(), sigma, atol=1e-5, rtol=1e-4)

        gamma = l.mu.eval() + tf.random_normal(shape=[4, num_inputs],
                                               seed=1234).eval() * sigma
        npt.assert_allclose(l.gamma.eval(), gamma, atol=1e-5, rtol=1e-4)

        gamma = l.mu.eval() + tf.random_normal(shape=(4, num_inputs),
                                               seed=1234).eval() * sigma
        Input += np.dot(np.zeros(4).reshape((1, -1)), gamma)
        npt.assert_allclose(l.call(Input).eval(), Input, atol=1e-5, rtol=1e-4)

        beta = params['b'] + 0.5 * (l.mu.eval()**2 + sigma**2).sum(
            axis=1, keepdims=True)  # coord_update
        ELBO = (-0.5 * (l.mu.eval()**2 + sigma**2) * (alpha / beta) + 0.5 *
                (psi(alpha) - np.log(beta)) - 0.5 * np.log(2 * np.pi)).sum()
        ELBO += ((params['a'] - 1) * (psi(alpha) - np.log(beta)) -
                 params['b'] * (alpha / beta) +
                 params['a'] * np.log(params['b']) -
                 gammaln(params['a'])).sum()
        ELBO += (0.5 * np.log(2 * np.pi) + 0.5 + np.log(sigma)).sum()
        ELBO += (alpha - np.log(beta) + gammaln(alpha) +
                 (1 - alpha) * psi(alpha)).sum()
        npt.assert_allclose(l.get_ELBO(nbatches).eval(),
                            ELBO / nbatches,
                            atol=1e-5,
                            rtol=1e-4)
コード例 #9
0
def create_model(dropout_rate):
    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (3, 3), activation='relu',
                      input_shape=(150, 150, 3)))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    return model
コード例 #10
0
ファイル: neural_model_new.py プロジェクト: jimobuwu/Colab
def build_model(input_img, model_weights_file):
    _, h, w, d = input_img.shape
    intput = np.zeros((1, h, w, d))

    # load weights
    global vgg_layers
    vgg_rawnet = scipy.io.loadmat(model_weights_file)
    vgg_layers = vgg_rawnet['layers'][0]

    model = models.Sequential()
    model.add
    model.add(conv_layer("conv1_1", 0))

    # model.compile(
    #     optimizer=tf.keras.optimizers.Adam,
    #     #? 可否自定义损失函数
    #     loss=)
    model.fit(intput)
コード例 #11
0
ファイル: MCFF.py プロジェクト: amunozh/bcquit
def classifier_model():  #Building of the CNN
    model = models.Sequential()

    model.add(
        layers.Conv2D(1, [2, 40],
                      input_shape=(1, 40, 173),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))
    #
    #model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    model.add(
        layers.Conv2D(1, [2, 20],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    #
    # model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    #
    model.add(
        layers.Conv2D(1, [2, 10],
                      strides=(3, 3),
                      padding='valid',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    #
    # model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    model.add(layers.Flatten())
    #
    model.add(
        layers.Dense(1,
                     kernel_initializer=initializers.glorot_normal(),
                     bias_initializer=initializers.Zeros()))

    print(model.summary())
    return model
コード例 #12
0
def generate_model():
    conv_base = tf.contrib.keras.applications.VGG16(include_top=False,
                                                    weights='imagenet',
                                                    input_shape=(IMG_WIDTH,
                                                                 IMG_HEIGHT,
                                                                 3))
    conv_base.trainable = True
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(
        layers.Dense(HIDDEN_SIZE,
                     name='dense',
                     kernel_regularizer=regularizers.l2(L2_LAMBDA)))
    model.add(layers.Dropout(rate=0.3, name='dropout'))
    model.add(
        layers.Dense(NUM_CLASSES, activation='softmax', name='dense_output'))
    model = multi_gpu_model(model, gpus=NUM_GPUS)
    print(model.summary())
    return model
コード例 #13
0
ファイル: ex2.py プロジェクト: engissa/Machine-Learning-Lab
def classifier_model():  # linear stack of

    ################################################################################
    ############################    YOUR CODE HERE   ################################

    # Define a Sequential model
    model = models.Sequential()
    # The first two layers are convolutional layers. For the first layer, we must specify the input shape.
    model.add(
        layers.Conv2D(
            NUM_FILTERS_1,
            3,
            strides=(2, 2),
            activation='relu',
            padding='same',
            input_shape=(28, 28, 1)))  # we have to add 2d convolutional layer
    # a conv layer is defined by the number of filters
    # second dimension we have to choose a stride of 2 : amount of shift that we are using for shift
    # papdding same = output is cropped , output should be 28*28
    # specify the kind of activate : RELU
    # first layer of input dimension
    # we initialize the biases to 0
    # we set kernel initializer

    model.add(
        layers.Conv2D(NUM_FILTERS_2,
                      3,
                      strides=(2, 2),
                      activation='relu',
                      padding='same'))
    # also a convolutional layer
    # 3x3 filters , 2x2 strides
    # we don't specify the input shape
    # The final layer is a dense, 1 dimensional layer. We must therefore first flatten the result of the
    # previous layer
    model.add(
        layers.Flatten()
    )  # we want to reduce to 1 dimension to be able to have a dense layer of 1 dim
    # convert 2-3 layer dimension into a vextor
    model.add(layers.Dense(10))
    return model
コード例 #14
0
def classifier_model():  #Building of the CNN
    model = models.Sequential()

    model.add(
        layers.Conv2D(1, [2, 10],
                      input_shape=(40, 44, 1),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      data_format='channels_last'))

    model.add(
        layers.MaxPooling2D(pool_size=(2, 2),
                            strides=None,
                            padding='valid',
                            data_format=None))

    model.add(
        layers.Conv2D(1, [2, 6],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    model.add(
        layers.MaxPooling2D(pool_size=(2, 2),
                            strides=None,
                            padding='valid',
                            data_format=None))

    model.add(
        layers.Conv2D(1, [2, 3],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    model.add(layers.Flatten())

    model.add(layers.Dense(1))

    print(model.summary())
    return model
コード例 #15
0
def build_model(input_seq_len, output_seq_len, num_samples, multi_gpus=False):

    RNN = layers.LSTM
    encoder_layers = 1
    decoder_layers = 2
    hidden_dim = 200
    model = models.Sequential()

    model.add(
        layers.TimeDistributed(layers.Dense(100, activation='relu'),
                               input_shape=(input_seq_len, 1)))
    for _ in range(encoder_layers):
        model.add(RNN(hidden_dim, return_sequences=True))
    model.add(RNN(hidden_dim, return_sequences=False))

    model.add(layers.RepeatVector(output_seq_len))
    for _ in range(decoder_layers):
        model.add(RNN(hidden_dim, return_sequences=True))
    model.add(layers.TimeDistributed(layers.Dense(1)))

    decay = 1. / num_samples
    optimizer = optimizers.Adam(lr=0.1, decay=decay)

    def score_func(y_true, y_pred):
        y_true = tf.reduce_sum(y_true, axis=1)
        y_pred = tf.reduce_sum(y_pred, axis=1)

        mae = tf.reduce_sum(tf.abs(y_true - y_pred))
        score = mae / tf.reduce_sum(y_true)
        return score

    if multi_gpus:
        model = keras.utils.multi_gpu_model(model, gpus=2)

    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=['mae'])

    print('model input shape: {0}'.format(model.input_shape))
    print('model output shape: {0}'.format(model.output_shape))
    return model
コード例 #16
0
def keras():
    from tensorflow.contrib.keras import models, layers, metrics, activations, losses, optimizers

    dnn_model = models.Sequential()

    dnn_model.add(layers.Dense(units=13, input_dim=13, activation="relu"))
    dnn_model.add(layers.Dense(units=13, activation="relu"))
    dnn_model.add(layers.Dense(units=13, activation="relu"))
    dnn_model.add(layers.Dense(units=3, activation="softmax"))

    dnn_model.compile(
        optimizer="adam",
        loss="sparse_categorical_crossentropy",
        metrics=["accuracy"]
    )

    dnn_model.fit(scaled_x_train, y_train, epochs=200)

    preds = dnn_model.predict_classes(scaled_x_test)

    print(classification_report(y_test, preds))
コード例 #17
0
def test_pkbiaslayer():
    batch_size = 16
    num_inputs = 8
    NN = models.Sequential()
    NN.add(layers.InputLayer(batch_input_shape=(batch_size, num_inputs)))
    params = {'k': 1}
    nbatches = 4
    Input = np.random.randn(1, num_inputs)

    l = PKBiasLayer(NN, params)
    assert isinstance(l, layers.Layer)
    assert l.draw_on_every_output

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        npt.assert_array_equal(l.mode.eval(), np.zeros(4))
        s = np.exp(l.log_s.eval())
        npt.assert_allclose(l.s.eval(), s, atol=1e-5, rtol=1e-4)

        biases = l.m.eval() + tf.random_normal(shape=[4, num_inputs],
                                               seed=1234).eval() * s
        npt.assert_allclose(l.biases.eval(), biases, atol=1e-5, rtol=1e-4)

        Input += np.dot(np.zeros(4).reshape((1, -1)), biases)
        npt.assert_allclose(l.call(Input).eval(), Input, atol=1e-5, rtol=1e-4)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        s = np.exp(l.log_s.eval())
        biases = l.m.eval() + tf.random_normal(shape=[4, num_inputs],
                                               seed=1234).eval() * s
        ELBO_tf = (tf.reduce_sum(-tf.abs(tf.constant(biases)) / l.k -
                                 tf.log(tf.constant(2.0) * l.k)))
        ELBO_tf += tf.reduce_sum(tf.log(l.s))
        ELBO_np = (-abs(biases) / params['k'] - np.log(2 * params['k'])).sum()
        ELBO_np += np.log(s).sum()
        npt.assert_allclose((ELBO_tf / nbatches).eval(),
                            ELBO_np / nbatches,
                            atol=1e-5,
                            rtol=1e-4)
#dane: csv_feature[n],  etykiety: csv_label[n]

arr = os.listdir(
    directory)  #wczytuje do tablicy nazwy wszystkich plików z danymi
arr = sorted(arr)  #sortuje alfabetycznie nazwy plików
csv_feature = [None] * int(
    len(arr) /
    2)  #tworzy tablicę z nazwami plików zawierających dane (feature)
csv_label = [None] * int(
    len(arr) /
    2)  #tworzy tablicę z nazwami plików zawierających klasy (labels)
csv_feature, csv_label = arrs_filenames(
    directory, csv_feature,
    csv_label)  #funkcja wczytująca nazwy plików danych (features)

dnn_keras_model = models.Sequential()
dnn_keras_model.add(layers.Dense(units=20, input_dim=8, activation='relu'))
dnn_keras_model.add(layers.Dense(units=20, activation='relu'))
#dnn_keras_model.add(layers.Dense(units=15,activation='relu'))#dodatkowa warstwa
dnn_keras_model.add(layers.Dense(units=3, activation='softmax'))
dnn_keras_model.compile(optimizer='adam',
                        loss='sparse_categorical_crossentropy',
                        metrics=['accuracy'])

x_data, labels, feat_cols = feat_and_labe(
    csv_feature,
    csv_label)  # funkcja generująca tablice danych (definicja powyżej)
X_train, X_test, y_train, y_test = train_test_split(
    x_data, labels, test_size=0.4, random_state=101)  # zbiory trenujące
# i testujące
コード例 #19
0
plt.savefig(
    'classes_samples.png')  #salva l'immagine con gli spettrogrammi di esempio
# sys.exit()

# scarica ed utilizza i pesi da imagenet per vgg16, questa è la base convoluzionale
conv_base = tf.contrib.keras.applications.InceptionV3(
    include_top=False,
    weights='imagenet',
    input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)  # 3 per i canali RGB
)
conv_base.summary()
# stampa info sulla base conv

# RETE
# Layer finali per la vgg
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())

model.add(
    layers.Dense(512,
                 name='dense_1',
                 kernel_regularizer=regularizers.l2(L2_LAMBDA)))
model.add(layers.Activation(activation='relu', name='activation_1'))

model.add(layers.Dense(NUM_CLASSES, activation='softmax', name='dense_output'))
model.summary()

conv_base.trainable = False
model.summary()
コード例 #20
0
 def _init_model(self):
     """ Initialize Keras model"""
     self.model = models.Sequential(self._all_layers)
コード例 #21
0
def test_DLGMLayer():

    xDim = 2
    yDim = 5

    mu_nn = layers.Input((None, yDim))
    mu_nn_d = (layers.Dense(
        xDim * xDim,
        activation="linear",
        kernel_initializer=tf.orthogonal_initializer())(mu_nn))
    mu_net = models.Model(inputs=mu_nn, outputs=mu_nn_d)

    u_nn = layers.Input((None, yDim))
    u_nn_d = (layers.Dense(
        xDim * xDim,
        activation="linear",
        kernel_initializer=tf.orthogonal_initializer())(u_nn))
    u_net = models.Model(inputs=u_nn, outputs=u_nn_d)

    unc_d_nn = layers.Input((None, yDim))
    unc_d_nn_d = (layers.Dense(
        xDim * xDim,
        activation="linear",
        kernel_initializer=tf.orthogonal_initializer())(unc_d_nn))
    unc_d_net = models.Model(inputs=unc_d_nn, outputs=unc_d_nn_d)

    Data = np.random.randn(10, 5).astype(np.float32)

    rec_nets = ({'mu_net': mu_net, 'u_net': u_net, 'unc_d_net': unc_d_net})

    NN = models.Sequential()
    inputlayer = layers.InputLayer(batch_input_shape=(10, 5))
    NN.add(inputlayer)

    lm = DLGMLayer(NN, 4, rec_nets=rec_nets, k=-1)
    lm.calculate_xi(tf.constant(Data.astype(np.float32)))
    lm.get_ELBO(tf.constant(10.0))

    num_units = 4

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        W = lm.W.eval()
        b = lm.b.eval()
        G = lm.G.eval()
        batch_u = lm.batch_u.eval()
        batch_unc_d = lm.batch_unc_d.eval()
        batch_mu = lm.batch_mu.eval()
        batch_Tr_C_lm = lm.batch_Tr_C.eval()
        batch_ld_C_lm = lm.batch_ld_C.eval()
        batch_R_lm = lm.batch_R.eval()
        get_ELBO_lm = lm.get_ELBO(tf.constant(10.0)).eval()
        activation_lm = lm.call(tf.constant(Data, dtype=tf.float32),
                                use_rec_model=True).eval()

    batch_Tr_C = []
    batch_ld_C = []
    batch_R = []

    batch_u = batch_u.astype(np.float32)
    batch_unc_d = batch_unc_d.astype(np.float32)
    for i in range(batch_u.shape[0]):
        u = batch_u[i]
        unc_d = batch_unc_d[i]
        d = np.log1p(np.exp(np.maximum(unc_d, -15.0)), dtype=np.float32)
        D_inv = np.diag(1.0 / d)
        eta = 1.0 / (u.T.dot(D_inv).dot(u) + 1.0)
        C = D_inv - eta * D_inv.dot(u).dot(u.T).dot(D_inv)
        Tr_C = np.trace(C)
        ld_C = np.log(eta) - np.log(d).sum()  # eq 20 in DLGM
        # coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))
        # simplified coefficient below is more stable as u -> 0
        # original coefficient from paper is above
        coeff = eta / (1.0 + np.sqrt(eta))
        R = np.sqrt(D_inv) - coeff * D_inv.dot(u).dot(u.T).dot(np.sqrt(D_inv))

        batch_Tr_C.append(Tr_C)
        batch_ld_C.append(ld_C)
        batch_R.append(R)

    batch_Tr_C = np.array(batch_Tr_C)
    batch_ld_C = np.array(batch_ld_C)
    batch_R = np.array(batch_R)

    npt.assert_allclose(batch_Tr_C_lm, batch_Tr_C, atol=1e-3, rtol=1e-4)
    npt.assert_allclose(batch_ld_C_lm, batch_ld_C, atol=1e-3, rtol=1e-4)
    npt.assert_allclose(batch_R_lm, batch_R, atol=1e-3, rtol=1e-4)

    KL_div = (0.5 * (np.sqrt((batch_mu**2).sum(axis=1)).sum() +
                     batch_Tr_C.sum() - batch_ld_C.sum() - 10.0))
    weight_reg = ((0.5 / -1) * np.sqrt((W**2).sum()) * np.sqrt((G**2).sum()))

    get_ELBO_np = -(weight_reg + KL_div)

    npt.assert_allclose(get_ELBO_np, get_ELBO_lm, atol=1e-5, rtol=1e-4)

    test_rand = np.random.normal(size=(batch_R.shape[0], num_units))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        batch_mu = lm.batch_mu.eval()
        batch_xi = (batch_mu + np.squeeze(
            np.matmul(lm.batch_R.eval(), np.expand_dims(test_rand, axis=2))))

        test_batch_xi = (lm.batch_mu + tf.squeeze(
            tf.matmul(lm.batch_R,
                      tf.expand_dims(tf.constant(test_rand, tf.float32), -1))))

        activation = np.matmul(np.maximum(Data, 0), W) + b
        xi = batch_xi
        activation += np.matmul(xi, G)

        inputs = tf.constant(Data, dtype=tf.float32)
        activation_lm = tf.matmul(lm.nonlinearity(inputs), lm.W) + lm.b
        activation_lm += tf.matmul(tf.constant(xi, tf.float32), lm.G)
        activation_lm = activation_lm.eval()

        npt.assert_allclose(batch_xi,
                            test_batch_xi.eval(),
                            atol=1e-5,
                            rtol=1e-4)
        npt.assert_allclose(activation_lm, activation, atol=1e-3, rtol=1e-4)
コード例 #22
0
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train_scaled = scaler.fit_transform(X_train)
x_test_scaled = scaler.transform(X_test)

"""
-----------------------------------------------------------------------------------------
TensorFlow Keras
-----------------------------------------------------------------------------------------
"""

#import keras
from tensorflow.contrib.keras import models

#create the keras dnn model
keras_dnn = models.Sequential()

#add model layers
from tensorflow.contrib.keras import layers
keras_dnn.add(layers.Dense(units=13,input_dim=13,activation='relu'))
keras_dnn.add(layers.Dense(units=13,activation='relu'))
keras_dnn.add(layers.Dense(units=13,activation='relu'))
keras_dnn.add(layers.Dense(units=3,activation='softmax'))
keras_dnn.add(layers.Dense(units=3,activation='softmax'))

#compile the model
from tensorflow.contrib.keras import losses,optimizers,metrics
losses.sparse_categorical_crossentropy
keras_dnn.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])

#train the model
コード例 #23
0
ファイル: core.py プロジェクト: wangmn93/VaDE
    def _pretrain_layers(self,
                         pretrain_data,
                         batch_size=64,
                         epochs=10,
                         verbose=0):
        """
        Pretrain layers using stacked auto-encoders
        Parameters
        ----------
        pretrain_data : 2d array_lay, (N,D)
            Data to use for pretraining. Can be the same as used for training
        batch_size : int, optional
        epochs : int, optional
        verbose : int, optional
            Verbosity level. Passed to Keras fit method
        Returns
        -------
            None. Layers trained in place
        """
        if verbose:
            print('{time}: Pretraining {num_layers:d} layers'.format(
                time=datetime.datetime.now(),
                num_layers=len(self._all_layers)))

        # for ind, end_layer in enumerate(self._all_layers):
        #     # print('Pre-training layer {0:d}'.format(ind))
        #     # Create AE and training
        #     cur_layers = self._all_layers[0:ind+1]
        #     ae = models.Sequential(cur_layers)
        #
        #     decoder1 = layers.Dense(2000, activation='relu')
        #     decoder2 = layers.Dense(500, activation='relu')
        #     decoder3 = layers.Dense(500, activation='relu')
        #     decoder4 = layers.Dense(784, activation='sigmoid')
        #     ae.add(decoder1)
        #     ae.add(decoder2)
        #     ae.add(decoder3)
        #     ae.add(decoder4)
        # from keras.callbacks import Callback
        # import my_utils
        # def epochBegin(epoch):
        #
        #
        #
        #     acc = my_utils.cluster_acc(np.argmax(gamma, axis=1), Y)
        #     global accuracy
        #     accuracy += [acc[0]]
        #     if epoch > 0:
        #         # print ('acc_gmm_on_z:%0.8f'%acc_g[0])
        #         print('acc_p_c_z:%0.8f' % acc[0])
        #
        #
        # class EpochBegin(Callback):
        #     def on_epoch_begin(self, epoch, logs={}):
        #         epochBegin(epoch)

        from keras.optimizers import SGD
        from keras import optimizers
        pretrain_optimizer = SGD(lr=0.001, momentum=0.9)
        ae = models.Sequential(self._all_layers)
        decoder1 = layers.Dense(2000, activation='relu')
        decoder2 = layers.Dense(500, activation='relu')
        decoder3 = layers.Dense(500, activation='relu')
        decoder4 = layers.Dense(784, activation='sigmoid')
        ae.add(decoder1)
        ae.add(decoder2)
        ae.add(decoder3)
        ae.add(decoder4)
        ae.compile(loss='binary_crossentropy', optimizer='adam')
        ae.fit(pretrain_data,
               pretrain_data,
               batch_size=batch_size,
               epochs=epochs,
               verbose=verbose)

        self.model = models.Sequential(self._all_layers)

        if verbose:
            print('{time}: Finished pretraining'.format(
                time=datetime.datetime.now()))
コード例 #24
0
 def _init_model(self):
     """ Initialize loss function and Keras model"""
     self._init_loss_func()
     self.model = models.Sequential(self._all_layers)
コード例 #25
0
def create_model(max_features, n_hidden):
    model = models.Sequential()
    model.add(layers.Embedding(input_dim=max_features, output_dim=n_hidden))
    model.add(layers.LSTM(n_hidden))
    model.add(layers.Dense(1, activation='sigmoid'))
    return model