コード例 #1
0
def final3D(path):
    checkpoint_dir = os.path.dirname(path)
    model = Sequential()
    #encoder
    #input = 28 x 28 x 1 (wide and thin)
    model.add(
        InputLayer(input_shape=(config.NUM_CHANNELS, config.IMG_HEIGHT,
                                config.IMG_WIDTH, 1)))

    model.add(
        Conv3D(128,
               3,
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(MaxPool3D(pool_size=(1, 2, 2)))

    model.add(
        Conv3D(64,
               3,
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(MaxPool3D(pool_size=(2, 2, 2)))

    model.add(
        Conv3D(32,
               1,
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())

    model.add(
        Conv3D(64,
               3,
               padding="same",
               stride=(2, 2, 2),
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())
    # model.add(UpSampling3D(size=(2,2,2)))

    model.add(
        Conv3D(128,
               3,
               padding="same",
               stride=(1, 2, 2),
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(UpSampling3D(size=(1, 2, 2)))
    model.add(Conv3D(1, 3, activation='sigmoid', padding='same'))

    model.compile(loss='mean_squared_error', optimizer=Adam())
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=path,
        # monitor='val_loss',
        save_weights_only=True,
        # save_best_only=True,
        verbose=1,
        save_freq='epoch')

    latest = tf.train.latest_checkpoint(checkpoint_dir)
    # print('latestdasdasdas')
    print(latest)
    if latest is not None:
        model.load_weights(latest)
        print('weights loaded')
    model.save(path + 'model.h5')

    return model, cp_callback
コード例 #2
0
ファイル: test_saving.py プロジェクト: wazzed/alibi-detect
                             OutlierVAE, OutlierVAEGMM, OutlierProphet,
                             SpectralResidual, OutlierSeq2Seq, OutlierAE)
from alibi_detect.utils.saving import save_detector, load_detector

input_dim = 4
latent_dim = 2
n_gmm = 2
threshold = 10.
samples = 5
seq_len = 10
p_val = .05
X_ref = np.random.rand(samples * input_dim).reshape(samples, input_dim)

# define encoder and decoder
encoder_net = tf.keras.Sequential([
    InputLayer(input_shape=(input_dim, )),
    Dense(5, activation=tf.nn.relu),
    Dense(latent_dim, activation=None)
])

decoder_net = tf.keras.Sequential([
    InputLayer(input_shape=(latent_dim, )),
    Dense(5, activation=tf.nn.relu),
    Dense(input_dim, activation=tf.nn.sigmoid)
])

kwargs = {'encoder_net': encoder_net, 'decoder_net': decoder_net}

preprocess_kwargs = {'model': UAE(encoder_net=encoder_net)}

gmm_density_net = tf.keras.Sequential([
コード例 #3
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, SimpleRNN, Conv1D, InputLayer

number_of_steps = 50
series = time.generate_time_series(batch_size=10000,
                                   number_of_steps=number_of_steps + 1)

X_train, y_train = series[:7000, :number_of_steps], series[:7000, -1]
X_valid, y_valid = series[7000:9000, :number_of_steps], series[7000:9000, -1]
X_test, y_test = series[9000:, :number_of_steps], series[9000:, -1]

np.random.seed(42)
tf.random.set_seed(42)

model = Sequential([
    InputLayer(input_shape=[None, 1]),
    Conv1D(filters=20,
           kernel_size=2,
           padding='causal',
           activation='relu',
           dilation_rate=[rate for rate in ((1, 2, 4, 8) * 2)]),
    Conv1D(filters=10, kernel_size=1)
])

model.compile(optimizer='adam', loss='mse')

history = model.fit(x=X_train,
                    y=y_train,
                    epochs=20,
                    validation_data=(X_valid, y_valid))
コード例 #4
0
from tensorflow.keras import Sequential
from tensorflow.keras.layers import InputLayer, Dense
import numpy as np
import gym
import matplotlib.pylab as plt

# Build simple sequential model with one flat hidden layer
model = Sequential()
model.add(InputLayer(batch_input_shape=(1, 5)))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(2, activation='linear'))
model.compile(optimizer='adam', loss='mse', metrics=['mae'])

# Gym initialization
env = gym.make('NChain-v0')
num_episodes = 100

y = 0.95
eps = 0.5
decay_factor = 0.999
r_avg_list = []

for i in range(num_episodes):
    game_step = 0
    eps *= decay_factor
    r_sum = 0
    done = False

    s = env.reset()

    print("Episode {} of {}".format(i + 1, num_episodes))
コード例 #5
0
    def build_model(self) -> None:
        """
        Build and compile a sequential ML model using TensorFlow to detect
        a single character with convolutional layers, max pooling layers,
        and fully-connected layers.
        """

        # build ML model to detect a single character by using CNN, max pooling
        # and fully connected layers
        model = Sequential()

        # add input layer that receives a gray-scale image of predefined size
        model.add(InputLayer(input_shape=consts.IMAGE_SIZE + (1, )))
        # add convolutional layers with zero-padding (in order to retain
        # image size) and rectified linear unit activations
        model.add(
            Conv2D(filters=32,
                   kernel_size=(3, 3),
                   activation=relu,
                   padding='same'))
        model.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation=relu,
                   padding='same'))
        # add max pooling layer to reduce image dimensions by a factor of 2 (reduce
        # total size by 4) and remove unwanted noise
        model.add(MaxPooling2D(pool_size=(2, 2), strides=2))

        # add another set of convolutional and max pooling layers, while
        # increasing the amount of filters per layer
        model.add(
            Conv2D(filters=128,
                   kernel_size=(3, 3),
                   activation=relu,
                   padding='same'))
        # add dropout layer to prevent overfitting
        model.add(Dropout(0.15))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=2))

        model.add(
            Conv2D(filters=256,
                   kernel_size=(3, 3),
                   activation=relu,
                   padding='same'))
        model.add(Dropout(0.15))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=2))

        # flatten the output from max pooling layer (2D output) into a
        # one-dimensional tensor
        model.add(Flatten())

        # add fully-connected layer after flattening and another dropout layer that
        # randomly turns off some percentage of neurons
        model.add(Dense(units=256, activation=relu))
        model.add(Dropout(0.40625))

        # add final output layer with 36 nodes (26 letters + 10 digits) with
        # activation of softmax to determine the probability of the input image
        # being some character
        model.add(Dense(units=36, activation=softmax))

        print(model.summary())

        # compile the model topography, set loss function, optimizer, and learning rate
        model.compile(optimizer=Adam(learning_rate=self.LR),
                      loss=categorical_crossentropy,
                      metrics=['accuracy'])

        self._model = model
        self._model_built = True
コード例 #6
0
 def make_q_function():
     return tf.keras.models.Sequential([InputLayer(input_shape),
                                        *model_fn(),
                                        Dense(self.n_actions)])
コード例 #7
0
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from alibi_detect.cd.tensorflow import UAE, HiddenOutput

n, n_features, n_classes, latent_dim, n_hidden = 100, 10, 5, 2, 7
shape = (n_features, )
X = np.random.rand(n * n_features).reshape(n, n_features).astype('float32')

encoder_net = tf.keras.Sequential(
    [InputLayer(input_shape=(n_features, )),
     Dense(latent_dim)])

tests_uae = [encoder_net, latent_dim]
n_tests_uae = len(tests_uae)


@pytest.fixture
def uae_params(request):
    return tests_uae[request.param]


@pytest.mark.parametrize('uae_params', list(range(n_tests_uae)), indirect=True)
def test_uae(uae_params):
    enc = uae_params
    if isinstance(enc, tf.keras.Sequential):
        encoder_net, enc_dim = enc, None
    elif isinstance(enc, int):
        encoder_net, enc_dim = None, enc
    X_enc = UAE(encoder_net=encoder_net, shape=X.shape[1:], enc_dim=enc_dim)(X)
コード例 #8
0
X_train =X_train /255.0
X_test =X_test/255.0

X_train = X_train.reshape(X_train.shape[0],28,28,1)
X_test = X_test.reshape(X_test.shape[0],28,28,1)
Y_train = np_utils.to_categorical(Y_train,TOTAL_CLASS)
Y_test = np_utils.to_categorical(Y_test,TOTAL_CLASS)

print('======Data Shaping info =====')
print('X train shape:', X_train.shape)
print('Y train shape:', Y_train.shape)
print('X test shape:', X_test.shape)
print('Y test shape:', Y_test.shape)

model = Sequential()
model.add(InputLayer(input_shape = (28,28,1)))
model.add(Conv2D(32,kernel_size = (3,3),padding ='same',activation = 'relu'))
model.add(MaxPool2D(padding ='same',pool_size =(2,2)))
model.add(Conv2D(32,kernel_size = (3,3),padding ='same',activation = 'relu'))
model.add(MaxPool2D(padding ='same',pool_size =(2,2)))
model.add(Flatten())
model.add(Dense(128,activation ='relu'))
model.add(Dense(TOTAL_CLASS,activation  ='softmax'))
model.summary()

sample = X_train[my_sample]
sample = sample.reshape(1,28,28,1)
pred = model.predict(sample)
print('Prediction before learning:',np.argmax(pred),'\n')
print('Label is:',np.argmax(Y_train[my_sample]),'\n')
コード例 #9
0
#%% Data augmentation
def normal_augmentation(input, output, seed, std=0.2, times=5):
    aug_input = np.vstack([(input*std*np.random.normal(size=input.shape)+input) for i in range(times)])
    aug_input = np.vstack([input, aug_input])
    aug_output = np.vstack([output for i in range(times+1)])
    return(aug_input, aug_output)
X_aug, Y_C_aug = normal_augmentation(X_train, Y_C_train, seed = 1, times = 20)
X_aug, Y_T_aug = normal_augmentation(X_train, Y_T_train, seed = 1, times = 20)

#%% Create model
batch_size = 100
n_hidden = 100
n_in = 2
model_c = Sequential()
model_c.add(InputLayer(input_shape=(time_len,2)))
model_c.add(BatchNormalization())
#model_c.add(LSTM(n_hidden, batch_input_shape =(None, time_len, 2),
model_c.add(LSTM(n_hidden, dropout=0.2, kernel_initializer = 'glorot_uniform',return_sequences=False))
#model_c.add(LSTM(n_hidden, dropout=0.2,kernel_initializer = 'glorot_uniform',return_sequences=True))
#model_c.add(TimeDistributed(Dense(1)))
model_c.add(Dense(1))
model_c.add(Activation("relu"))
ADAM = optimizers.Adam()
model_c.compile(loss='mean_squared_error',optimizer=ADAM)
model_c.summary()


#%%
model_t = Sequential()
model_t.add(InputLayer(input_shape=(time_len,2)))
コード例 #10
0
def deep_network_basic(in_n, hidden, out_n, hidden_activation,
                       output_activation, lrate, dropout, l1, l2, metrics_):
    model = Sequential()

    # Construct model
    # First, check for Lx regularizatio
    if dropout == None:
        dropout = 0

    if l1 != 0 or l2 != 0:
        model.add(InputLayer(input_shape=(in_n, )))
        for idx, layer_n in enumerate(hidden):
            title = "hidden" + str(idx)
            # Check for either l1 or l2 regularization
            # And add hidden layers
            if l1 != 0:
                model.add(
                    Dense(layer_n,
                          use_bias=True,
                          name=title,
                          activation=hidden_activation,
                          kernel_initializer="he_normal",
                          kernel_regularizer=keras.regularizers.l1(l1)))
            if l2 != 0:
                model.add(
                    Dense(layer_n,
                          use_bias=True,
                          name=title,
                          activation=hidden_activation,
                          kernel_initializer="he_normal",
                          kernel_regularizer=keras.regularizers.l2(l2)))
        model.add(
            Dense(out_n,
                  use_bias=True,
                  name="output",
                  activation=output_activation))
    # If no LxReg, then build model without it
    elif dropout == 0:
        # construct model without dropout
        model.add(input_shape=(in_n, ))
        for idx, layer_n in enumerate(hidden):
            title = "hidden_" + str(idx)
            model.add(
                Dense(layer_n,
                      use_bias=True,
                      name=title,
                      activation=hidden_activation))
        model.add(
            Dense(out_n,
                  use_bias=True,
                  name="output",
                  activation=output_activation))
    elif dropout != 0:
        model.add(Dropout(dropout, input_shape=(in_n, )))
        # Add dropout in the input layer and in the hidden layers
        for idx, layer_n in enumerate(hidden):
            title = "hidden" + str(idx)
            model.add(
                Dense(layer_n,
                      use_bias=True,
                      name=title,
                      activation=hidden_activation))
            model.add(Dropout(dropout))
        model.add(
            Dense(out_n,
                  use_bias=True,
                  name="output",
                  activation=output_activation))

    # other method
    # Lx regularization
    # if other == true:
    #     RegularizedDense = partial(keras.layers.Dense,
    #         activation="elu",
    #         kernel_initializer=keras.regularizers.l2(0.01))
    #     model = keras.models.Sequential([
    #         keras.layers.Flatten(input_shape=(in_n,),
    #         RegularizedDense(1000),
    #         RegularizedDense(100),
    #         RegularizedDense(10, activation="elu",
    #         kernel_initalizer="glorot_uniform")))
    #     ])

    # Optiemizer
    opt = tf.keras.optimizers.Adam(lr=lrate,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=None,
                                   decay=0.0,
                                   amsgrad=False)

    # Bind the optimizer and the loss function to the model
    model.compile(loss='mse', optimizer=opt, metrics=metrics_)

    # Generate an ASCII representation of the architecture
    print(model.summary())
    return model
コード例 #11
0
ファイル: test_ks.py プロジェクト: yt114/alibi-detect
def test_ksdrift(ksdrift_params):
    n_features, n_enc, preprocess, alternative, correction, update_X_ref = ksdrift_params
    np.random.seed(0)
    X_ref = np.random.randn(n * n_features).reshape(
        n, n_features).astype('float32')
    n_infer = 2
    preprocess_fn, preprocess_kwargs = preprocess
    if isinstance(preprocess_fn, Callable):
        if preprocess_fn.__name__ == 'uae' and n_features > 1 and isinstance(
                n_enc, int):
            tf.random.set_seed(0)
            encoder_net = tf.keras.Sequential(
                [InputLayer(input_shape=(n_features, )),
                 Dense(n_enc)])
            preprocess_kwargs['encoder_net'] = encoder_net
        elif preprocess_fn.__name__ == 'hidden_output':
            model = mymodel((n_features, ))
            preprocess_kwargs['model'] = model
        elif preprocess_fn.__name__ == 'pca' and isinstance(n_enc, int):
            if n_enc < n_features:
                preprocess_kwargs['n_components'] = n_enc
                n_infer = n_enc
            else:
                preprocess_fn, preprocess_kwargs = None, None
        else:
            preprocess_fn, preprocess_kwargs = None, None
    else:
        preprocess_fn, preprocess_kwargs = None, None

    cd = KSDrift(p_val=.05,
                 X_ref=X_ref,
                 update_X_ref=update_X_ref,
                 preprocess_fn=preprocess_fn,
                 preprocess_kwargs=preprocess_kwargs,
                 correction=correction,
                 alternative=alternative,
                 n_infer=n_infer)
    X = X_ref.copy()
    preds_batch = cd.predict(X, drift_type='batch', return_p_val=True)
    assert preds_batch['data']['is_drift'] == 0
    k = list(update_X_ref.keys())[0]
    assert cd.n == X.shape[0] + X_ref.shape[0]
    assert cd.X_ref.shape[0] == min(update_X_ref[k],
                                    X.shape[0] + X_ref.shape[0])

    preds_feature = cd.predict(X, drift_type='feature', return_p_val=True)
    assert preds_feature['data']['is_drift'].shape[0] == cd.n_features
    preds_by_feature = (preds_feature['data']['p_val'] < cd.p_val).astype(int)
    assert (preds_feature['data']['is_drift'] == preds_by_feature).all()

    np.random.seed(0)
    X_randn = np.random.randn(n * n_features).reshape(
        n, n_features).astype('float32')
    mu, sigma = 5, 5
    X_low = sigma * X_randn - mu
    X_high = sigma * X_randn + mu
    preds_batch = cd.predict(X_high, drift_type='batch')
    if alternative != 'less':
        assert preds_batch['data']['is_drift'] == 1
    preds_batch = cd.predict(X_low, drift_type='batch')
    if alternative != 'greater':
        assert preds_batch['data']['is_drift'] == 1
コード例 #12
0
def test_lsdd(lsdd_params):
    n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_x_ref = lsdd_params

    np.random.seed(0)

    x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(
        np.float32)
    preprocess_fn, preprocess_kwargs = preprocess
    to_list = False
    if hasattr(preprocess_fn,
               '__name__') and preprocess_fn.__name__ == 'preprocess_list':
        if not preprocess_x_ref:
            return
        to_list = True
        x_ref = [_[None, :] for _ in x_ref]
    elif isinstance(preprocess_fn, Callable):
        if 'layer' in list(preprocess_kwargs.keys()) \
                and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
            model = mymodel((n_features, ))
            layer = preprocess_kwargs['layer']
            preprocess_fn = partial(preprocess_fn,
                                    model=HiddenOutput(model=model,
                                                       layer=layer))
        elif preprocess_kwargs['model'].__name__ == 'UAE' \
                and n_features > 1 and isinstance(n_enc, int):
            tf.random.set_seed(0)
            encoder_net = tf.keras.Sequential(
                [InputLayer(input_shape=(n_features, )),
                 Dense(n_enc)])
            preprocess_fn = partial(preprocess_fn,
                                    model=UAE(encoder_net=encoder_net))
        else:
            preprocess_fn = None
    else:
        preprocess_fn = None

    cd = LSDDDriftTF(x_ref=x_ref,
                     p_val=.05,
                     preprocess_x_ref=preprocess_x_ref if isinstance(
                         preprocess_fn, Callable) else False,
                     update_x_ref=update_x_ref,
                     preprocess_fn=preprocess_fn,
                     n_permutations=n_permutations)

    perturbation = np.random.normal(
        size=(n, n_features)) / 100  # LSDD struggles with copies/repeats
    x = x_ref.copy() + perturbation.astype(np.float32)
    preds = cd.predict(x, return_p_val=True)
    assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
    if isinstance(update_x_ref, dict):
        k = list(update_x_ref.keys())[0]
        assert cd.n == len(x) + len(x_ref)
        assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))

    x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(
        np.float32)
    if to_list:
        x_h1 = [_[None, :] for _ in x_h1]
    preds = cd.predict(x_h1, return_p_val=True)
    if preds['data']['is_drift'] == 1:
        assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
        assert preds['data']['distance'] > preds['data']['distance_threshold']
    else:
        assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
        assert preds['data']['distance'] <= preds['data']['distance_threshold']
コード例 #13
0
def conv3d(filters,
           latentDim,
           path,
           batch=False,
           dropout=False,
           filter_size=3):

    checkpoint_dir = os.path.dirname(path)
    model = Sequential()
    #encoder
    #input = 28 x 28 x 1 (wide and thin)
    model.add(
        InputLayer(input_shape=(config.NUM_CHANNELS, config.IMG_HEIGHT,
                                config.IMG_WIDTH, 1)))

    for f in filters:
        model.add(
            Conv3D(f,
                   filter_size,
                   strides=2,
                   activation='relu',
                   padding="same"))
        if (dropout): model.add(Dropout(0.2))
        if (batch): model.add(BatchNormalization())
        # model.add(MaxPool3D(pool_size=(1,2,2)))
    if (latentDim is not None):
        # model.add(Flatten())
        model.add(
            Conv3D(latentDim, 1, strides=1, activation='relu', padding="same"))
        if (batch): model.add(BatchNormalization())
    # model.add(Flatten())
    # model.add(Dense(latentDim))
    for f in reversed(filters):
        # apply a CONV_TRANSPOSE => RELU => BN operation
        model.add(
            Conv3DTranspose(f,
                            filter_size,
                            activation='relu',
                            strides=2,
                            padding="same"))
        if (dropout): model.add(Dropout(0.2))
        if (batch): model.add(BatchNormalization())

    model.add(Conv3D(1, filter_size, activation='sigmoid', padding='same'))
    if (config.NUM_CHANNELS % (2**len(filters)) != 0):
        dim = config.NUM_CHANNELS
        for i in range(len(filters)):
            if (dim % 2 != 0):
                dim = int(dim / 2)
                dim += 1
            else:
                dim = int(dim / 2)
        print(dim)
        croppingFactor = int(
            (dim * (2**len(filters)) - config.NUM_CHANNELS) / 2)
        model.add(
            Cropping3D(cropping=((croppingFactor, croppingFactor), (0, 0),
                                 (0, 0))))

    model.summary()

    model.compile(loss='mean_squared_error', optimizer=Adam())
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=path,
        # monitor='val_loss',
        save_weights_only=True,
        # save_best_only=True,
        verbose=1,
        save_freq='epoch')

    latest = tf.train.latest_checkpoint(checkpoint_dir)
    # print('latestdasdasdas')
    print(latest)
    if latest is not None:
        model.load_weights(latest)
        print('weights loaded')

    return model, cp_callback
コード例 #14
0
def modular2d3dims(filters,
                   latentDim,
                   path,
                   batch=False,
                   dropout=False,
                   filter_size=3):

    checkpoint_dir = os.path.dirname(path)
    model = Sequential()
    #encoder
    #input = 28 x 28 x 1 (wide and thin)
    model.add(
        InputLayer(input_shape=(config.NUM_CHANNELS, config.IMG_HEIGHT,
                                config.IMG_WIDTH)))
    for f in filters:
        # print('assssdad')
        model.add(
            Conv2D(f,
                   filter_size,
                   strides=2,
                   activation='relu',
                   padding="same",
                   data_format="channels_first"))
        if (dropout): model.add(Dropout(0.2))
        if (batch): model.add(BatchNormalization())
        # model.add(MaxPooling2D(pool_size=(2, 2)))
    if (latentDim is not None):
        # model.add(Flatten())
        model.add(
            Conv2D(latentDim, (1, 1),
                   strides=1,
                   activation='relu',
                   padding="same",
                   data_format="channels_first"))
        if (batch): model.add(BatchNormalization())
    for f in reversed(filters):
        # apply a CONV_TRANSPOSE => RELU => BN operation
        model.add(
            Conv2DTranspose(f,
                            filter_size,
                            activation='relu',
                            strides=2,
                            padding="same",
                            data_format="channels_first"))
        if (dropout): model.add(Dropout(0.2))
        if (batch): model.add(BatchNormalization())

    # model.add(Reshape((20, config.IMG_HEIGHT, config.IMG_WIDTH)))
    model.add(
        Conv2D(config.NUM_CHANNELS,
               filter_size,
               activation='sigmoid',
               padding='same',
               data_format="channels_first"))
    # model.add(Cropping2D((1)))

    model.summary()

    model.compile(loss='mean_squared_error', optimizer=Adam())
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=path,
        # monitor='val_loss',
        save_weights_only=True,
        # save_best_only=True,
        verbose=1,
        save_freq='epoch')

    latest = tf.train.latest_checkpoint(checkpoint_dir)
    # print('latestdasdasdas')
    print(latest)
    if latest is not None:
        model.load_weights(latest)
        print('weights loaded')

    return model, cp_callback
コード例 #15
0
import tensorflow.keras
import pygad.kerasga
from pygad.kerasga import KerasGA
import numpy
import pygad
from tensorflow.keras.initializers import Ones, ones
from tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy, MeanAbsoluteError
from tensorflow.keras import Model
from tensorflow.keras.utils import plot_model, pack_x_y_sample_weight
from tensorflow.keras.models import Sequential
from tensorflow.keras.activations import sigmoid
from pygad.kerasga import predict

# Create a Keras model
model = Sequential([
    InputLayer(input_shape=(2, )),
    Dense(units=2, use_bias=True, bias_initializer=Ones(), activation=sigmoid),
    Dense(units=1, use_bias=True, bias_initializer=Ones(), activation=sigmoid)
])

# Create an instance of the pygad.kerasga.KerasGA class.
kerasGA = KerasGA(model=model, num_solutions=9)

# Prepare the Training Data
# XOR problem inputs
data_inputs = numpy.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])

# XOR problem outputs
data_outputs = numpy.array([[0.0], [1.0], [1.0], [0.0]])

コード例 #16
0
#Fit the encoder instance to training label
Y_train = le.fit_transform(Y_train.astype(str))

#Encode the test labels
Y_test = le.transform(Y_test.astype(str))

#Transform Y_ into a binary vector
Y_train = to_categorical(Y_train)
Y_test = to_categorical(Y_test)

####......Design the model....########
#Initialize the model
model = Sequential()

#Create an input layer instance for model
model.add(InputLayer(input_shape=(X_train.shape[1],)))
#create a hidden layer
model.add(Dense(12, activation = 'relu'))
#create output layer
model.add(Dense(2, activation='softmax'))

#Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

####.....Train and Evaluate the Model.....######

model.fit(X_train, Y_train, epochs = 100, batch_size = 16, verbose = 1)

#model evaluate
loss, acc = model.evaluate(X_test, Y_test, verbose=0)
print("Loss", loss, "Accuracy", acc)
コード例 #17
0
            tag = m.group(2)
            sent.append(word_to_int[word])
            if n == focus_position:
                int_tag = tag_to_int[tag]
        n += 1
    X.append(sent)
    y.append(int_tag)

X = pad_sequences(X, maxlen=maxlen, padding='post')
#y = pad_sequences(y, maxlen=maxlen, padding='post')

#X=np.array(X[:300])
#y=np.array(y[:300])

model = Sequential()
model.add(InputLayer(input_shape=(maxlen, )))
model.add(Embedding(vocab_len, 128))
model.add(Bidirectional(LSTM(256, return_sequences=True)))
model.add(Attention())
model.add(Dense(len(Tags) + 1))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=Adam(0.001),
              metrics=['accuracy'])

model.summary()

#plot_model(model, to_file='LSTM_pos_model.png')

X_train, X_test, y_train, y_test = train_test_split(X,
コード例 #18
0
ファイル: Hi_Lo_Fz.py プロジェクト: ahmerali94/MUSP
def model_train(model_add, x_train, y_train, x_test, y_test):

    # build a sequential model
    model = Sequential()
    model.add(InputLayer(input_shape=(512, 512, 1)))

    # 1st conv block
    model.add(
        Conv2D(25, (5, 5), activation='relu', strides=(1, 1), padding='same'))
    model.add(MaxPool2D(pool_size=(2, 2), padding='same'))
    # 2nd conv block
    model.add(
        Conv2D(50, (5, 5), activation='relu', strides=(2, 2), padding='same'))
    model.add(MaxPool2D(pool_size=(2, 2), padding='same'))
    model.add(BatchNormalization())
    # 3rd conv block
    model.add(
        Conv2D(70, (3, 3), activation='relu', strides=(2, 2), padding='same'))
    model.add(MaxPool2D(pool_size=(2, 2), padding='valid'))
    model.add(BatchNormalization())
    # ANN block
    model.add(Flatten())
    model.add(Dense(units=100, activation='relu'))
    model.add(Dense(units=100, activation='relu'))
    model.add(Dropout(0.25))
    # output layer
    model.add(Dense(units=2, activation='softmax'))

    #optimizer = keras.optimizers.Adam(lr=0.01)
    #opt = SGD(lr=1)
    if os.path.isfile(model_add):
        print('Found saved model, loading now')
        user_choice = messagebox.askyesno('Found saved model',
                                          'You want to train again?')
        if (user_choice == True):
            model.summary()
            model.compile(loss='categorical_crossentropy',
                          optimizer='adamax',
                          metrics=['accuracy'])
            # fit on data for 30 epochs
            es = MyThresholdCallback(threshold=0.89)
            history = model.fit(x_train,
                                y_train,
                                validation_data=(x_test, y_test),
                                epochs=100,
                                batch_size=1,
                                callbacks=[es])
            plot_acc(history)
            plot_loss(history)
            model.save(model_add)
        else:
            model = tensorflow.keras.models.load_model(model_add)
            model.summary()

    else:
        print('No saved model found, fitting new model with the data')
        print('Saving to ' + model_add)
        model.summary()
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        es = MyThresholdCallback(threshold=0.89)
        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_test, y_test),
                            epochs=100,
                            batch_size=1,
                            callbacks=[es])
        # fit on data for 30 epochs
        plot_acc(history)
        plot_loss(history)
        model.save(model_add)
    return model
コード例 #19
0
def pretrain_dae_nn(train_data,
                    validation_data,
                    hidden_shape_list,
                    n_output,
                    lrate,
                    activation,
                    epochs,
                    callbacks,
                    out_activation=None,
                    metrics=None,
                    loss='mse',
                    pretrain_loss='mse',
                    dropout=None,
                    L2_reg=None,
                    verbose=False,
                    return_params=False):

    if out_activation is None:
        out_activation = activation

    # start by building a nn with the first hidden layer
    # This probably won't work for 2-D inputs without some modification
    print('constructing initial model:', train_data.X.shape[1],
          hidden_shape_list[0], train_data.X.shape[1])
    base_model, loss, opt, metrics = build_nn(train_data.X.shape[1],
                                              [hidden_shape_list[0]],
                                              train_data.X.shape[1],
                                              lrate=lrate,
                                              activation=activation,
                                              out_activation=out_activation,
                                              metrics=metrics,
                                              loss=pretrain_loss,
                                              dropout=dropout,
                                              L2_reg=L2_reg,
                                              return_params=True)

    if verbose:
        base_model.summary()
        print('training on :')
        print(train_data.X.shape, train_data.X)
        print(train_data.y.shape, train_data.y)

    # now, fit to train data
    base_model.fit(train_data.X,
                   train_data.y,
                   validation_data=validation_data,
                   epochs=epochs,
                   callbacks=callbacks)

    # once we're fit, train the next layer
    for i, h in enumerate(hidden_shape_list[1:]):
        # freeze the hidden layer
        base_model.trainable = False
        # remove the output layer
        # normally, could just call model.layers.pop() , but this isn't working
        # construct a new model with just the layers we need
        model = Sequential()
        model.add(InputLayer(input_shape=train_data.X.shape[1]))
        for layer in base_model.layers[:
                                       -1]:  # up to, but not including, the last layer
            model.add(layer)

        # add a new hidden layer
        if L2_reg is not None:
            model.add(
                Dense(h,
                      activation=activation,
                      use_bias=True,
                      kernel_regularizer=L2(L2_reg)))
        else:
            model.add(Dense(h, activation=activation, use_bias=True))
        if dropout is not None:
            model.add(Dropout(dropout))

        # add the output layer back
        if L2_reg is not None:
            model.add(
                Dense(train_data.X.shape[1],
                      name='output' + str(i + 1),
                      activation=out_activation,
                      use_bias=True,
                      kernel_regularizer=L2(L2_reg)))
        else:
            model.add(
                Dense(train_data.X.shape[1],
                      name='output' + str(i + 1),
                      activation=out_activation,
                      use_bias=True))

        # recompile the model
        model.compile(loss=pretrain_loss, metrics=metrics, optimizer=opt)
        if verbose:
            model.summary()
        # train again
        model.fit(train_data.X,
                  train_data.y,
                  validation_data=validation_data,
                  epochs=epochs,
                  callbacks=callbacks)

        # reset for next iteration
        base_model = model

    # now, add the real output
    # remove the output layer
    model = Sequential()
    model.add(InputLayer(input_shape=train_data.X.shape[1]))
    # TODO: dropout for the input layer ? Probably not

    for layer in base_model.layers[:
                                   -1]:  # up to, but not including, the last layer
        model.add(layer)
        # add dropout between each layers
        if dropout is not None:
            model.add(Dropout(dropout))

    if L2_reg is not None:
        model.add(
            Dense(train_data.X.shape[1],
                  name='output' + str(i + 1),
                  activation=out_activation,
                  use_bias=True,
                  kernel_regularizer=L2(L2_reg)))
    else:
        model.add(
            Dense(train_data.X.shape[1],
                  name='output' + str(i + 1),
                  activation=out_activation,
                  use_bias=True))

    # unfreeze everything
    model.trainable = True

    model.compile(loss=loss, metrics=metrics, optimizer=opt)

    # additional returns are for use in modifying and recompiling
    if return_params:
        return model, loss, opt, metrics
    else:
        return model
コード例 #20
0
ファイル: Aphina_text.py プロジェクト: MagzhanTr/AI-ChatBot
        output_row[labels.index(docs_y[x])] = 1

        training.append(bag)
        output.append(output_row)

    training = numpy.array(training)
    output = numpy.array(output)

    with open("data.pickle", "wb") as f:
        pickle.dump((words, labels, training, output), f)

ops.reset_default_graph()

model = Sequential()
model.add(Flatten())
model.add(InputLayer(input_shape=(None, len(training[0]))))
model.add(Dense(8, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(len(output[0]), activation="softmax"))

model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=["accuracy"])

if os.path.exists("Aphina_model/saved_model" + ".pb"):
    keras.models.load_model("Aphina_model")
else:
    model.fit(training, output, epochs=1000, batch_size=8)
    model.save("Aphina_model")

コード例 #21
0
def predict(filename):
    image_url = url_for('images', filename=filename)
    image_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)

    image_data = load_and_prepare(image_path)
    print(hasattr(image_data, "_getexif"))

    # keras imports
    from tensorflow.keras.preprocessing.image import load_img, img_to_array
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, GlobalAveragePooling2D
    from tensorflow.keras.layers import Dropout, InputLayer, BatchNormalization

    # instantiate the model
    NN = Sequential()
    NN.add(InputLayer(input_shape=(150, 150, 3)))
    # Conv block 1.
    NN.add(Conv2D(filters=2, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    NN.add(Dropout(0.2))
    # Conv block 2
    NN.add(Conv2D(filters=4, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    # Conv block 3
    NN.add(Conv2D(filters=8, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    NN.add(Dropout(0.5))
    # Conv block 4
    NN.add(Conv2D(filters=12, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    # Conv block 5
    NN.add(Conv2D(filters=24, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    # Fully connected block - flattening followed by dense and output layers
    NN.add(Flatten())
    NN.add(Dense(4, activation='elu'))
    NN.add(BatchNormalization())
    NN.add(Dropout(0.5))
    NN.add(Dense(2, activation='sigmoid'))  # 2 target classes, output layer

    # define the model and load the weights
    NEURAL_NET_MODEL_PATH = os.environ['NEURAL_NET_MODEL_PATH']
    NN.load_weights(NEURAL_NET_MODEL_PATH)

    # predict and generate bar graph
    predictions = NN.predict(image_data)[0]

    # imports for emotion detection
    from fer import FER
    import cv2

    detector = FER()
    image_data = image_data * 255
    image_data = image_data[0]
    image_data = image_data.astype('uint8')
    emotions = detector.detect_emotions(image_data)
    emotions_dict = emotions[0]['emotions']
    script, div = generate_barplot(emotions_dict)

    try:
        return render_template('predict.html',
                               plot_script=script,
                               plot_div=div,
                               image_url=image_url,
                               message=predictions[1])

    except IndexError:
        return render_template(
            'predict_no_emotion.html',
            plot_script=script,
            plot_div=div,
            image_url=image_url,
            message=
            'Oops! We could not capture your emotions with that photo. Please try again and make sure your face is clearly visible!'
        )
コード例 #22
0
ファイル: 08_NN_opt.py プロジェクト: shryu8902/covid19
T_scaler = MinMaxScaler()
temp = C_scaler.fit_transform(CORE_DATA[['CC_', 'TT_']])
CORE_DATA['CC_s'] = C_scaler.fit_transform(CORE_DATA[['CC_']])
CORE_DATA['TT_s'] = T_scaler.fit_transform(CORE_DATA[['TT_']])
#%%
state_list = list(set(DATA.state))
state_list.sort()
state_list = state_list + ['US']
#%%

test = np.arange(40).reshape(-1, 10, 2)


#%%
def non_adder(x):
    y = x[0] + x[1]
    return (y)


test = Lambda(non_adder)

K.clear_session()
model = Sequential()
model.add(InputLayer(input_shape=(time_len, 2)))
model.add(Lambda(non_adder, output_shape=[1]))
model.compile(loss='mean_squared_error')

model.predict(test)

# %%
print(dataset.describe())
# Luckily, there are no categorical variables in this dataset, so you do not have to perform one-hot encoding. You should always check this, however, before diving into analysis!
# dataset = pd.get_dummies(dataset)
features = dataset.iloc[:, 0:-1]
labels = dataset.iloc[:,1]
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=1)

# Standardize
#numerical_features = features_train.select_dtypes(include=['float64', 'int64']) #only select numerical features types automatically
ct = ColumnTransformer([("only numeric", StandardScaler(), features.columns)], remainder='passthrough')  
x_train_scaled = ct.fit_transform(x_train)
x_test_scaled = ct.transform(x_test)

# Do extensions code below
my_model = Sequential()
my_model.add(InputLayer(input_shape=(x_train_scaled.shape[1],)))
my_model.add(Dense(16, activation='relu'))
my_model.add(Dense(1))
print(my_model.summary())
# optimizer
opt = Adam(learning_rate =0.01)
my_model.compile(loss='mse', metrics=['mae'], optimizer=opt)
#train
history = my_model.fit(x_train_scaled, y_train, epochs=40, batch_size=1, verbose=1, validation_split = 0.3)   #without validation_split, cannot extract val_mae
my_model.evaluate(x_test_scaled, x_test)

# PLOT the model loss per epoch as well as the mean-average error per epoch for both training and validation data. This will give you an insight into how the model performs better over time and can also help you figure out better ways to tune your hyperparameters.
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax1.plot(history.history['mae'])
ax1.plot(history.history['val_mae'])
コード例 #24
0
ファイル: test_prediction.py プロジェクト: yt114/alibi-detect
X = np.zeros((n, n_features))


class MyModel(tf.keras.Model):
    def __init__(self):
        super(MyModel, self).__init__()
        self.dense = Dense(n_classes, activation='softmax')

    def call(self, x: np.ndarray) -> tf.Tensor:
        return self.dense(x)


model = MyModel()

encoder_net = tf.keras.Sequential(
    [InputLayer(input_shape=(n_features, )),
     Dense(latent_dim)])
decoder_net = tf.keras.Sequential(
    [InputLayer(input_shape=(latent_dim, )),
     Dense(n_features)])
AutoEncoder = AE(encoder_net, decoder_net)

# model, proba, return_class, shape
tests_predict = [(model, True, False, None), (model, False, True, None),
                 (model, False, False, (n, n_classes)),
                 (AutoEncoder, False, False, None),
                 (AutoEncoder, True, False, None)]
n_tests = len(tests_predict)


@pytest.fixture
コード例 #25
0
def test_lsdd_online(lsdd_online_params):
    n_features, n_enc, ert, window_size, preprocess, n_bootstraps = lsdd_online_params

    np.random.seed(0)

    x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(
        np.float32)
    preprocess_fn, preprocess_kwargs = preprocess
    to_list = False
    if hasattr(preprocess_fn,
               '__name__') and preprocess_fn.__name__ == 'preprocess_list':
        to_list = True
        x_ref = [_[None, :] for _ in x_ref]
    elif isinstance(preprocess_fn, Callable):
        if 'layer' in list(preprocess_kwargs.keys()) \
                and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
            model = mymodel((n_features, ))
            layer = preprocess_kwargs['layer']
            preprocess_fn = partial(preprocess_fn,
                                    model=HiddenOutput(model=model,
                                                       layer=layer))
        elif preprocess_kwargs['model'].__name__ == 'UAE' \
                and n_features > 1 and isinstance(n_enc, int):
            tf.random.set_seed(0)
            encoder_net = tf.keras.Sequential(
                [InputLayer(input_shape=(n_features, )),
                 Dense(n_enc)])
            preprocess_fn = partial(preprocess_fn,
                                    model=UAE(encoder_net=encoder_net))
        else:
            preprocess_fn = None
    else:
        preprocess_fn = None

    cd = LSDDDriftOnlineTF(x_ref=x_ref,
                           ert=ert,
                           window_size=window_size,
                           preprocess_fn=preprocess_fn,
                           n_bootstraps=n_bootstraps)

    x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(
        np.float32)
    detection_times_h0 = []
    test_stats_h0 = []
    for x_t in x_h0:
        if to_list:
            x_t = [x_t]
        pred_t = cd.predict(x_t, return_test_stat=True)
        test_stats_h0.append(pred_t['data']['test_stat'])
        if pred_t['data']['is_drift']:
            detection_times_h0.append(pred_t['data']['time'])
            cd.reset()
    average_delay_h0 = np.array(detection_times_h0).mean()
    test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
    assert ert / 3 < average_delay_h0 < 3 * ert

    cd.reset()

    x_h1 = 1 + np.random.randn(n * n_features).reshape(n, n_features).astype(
        np.float32)
    detection_times_h1 = []
    test_stats_h1 = []
    for x_t in x_h1:
        if to_list:
            x_t = [x_t]
        pred_t = cd.predict(x_t, return_test_stat=True)
        test_stats_h1.append(pred_t['data']['test_stat'])
        if pred_t['data']['is_drift']:
            detection_times_h1.append(pred_t['data']['time'])
            cd.reset()
    average_delay_h1 = np.array(detection_times_h1).mean()
    test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
    assert np.abs(average_delay_h1) < ert / 2

    assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
コード例 #26
0
    train_loss(mask_loss)
    train_accuracy(labels, predictions)


#@tf.function
def test_step(images, labels, use_mask=True):
    predictions = model(images, training=False, use_mask=use_mask)
    t_loss = cross_entropy(labels, predictions)

    test_loss(t_loss)
    test_accuracy(labels, predictions)


layers = [
    InputLayer(input_shape=(28, 28, 1)),
    BinaryLotteryConv2D(16,
                        kernel_size=3,
                        strides=2,
                        trainable_M=False,
                        const_init_M=20),
    ReLU(),
    Conv2D(32, kernel_size=4, strides=2),
    ReLU(),
    #BinaryLotteryConv2D(16, kernel_size=3, strides=2, trainable_M=False, const_init_M=20),
    #ReLU(),
    Flatten(),
    Dense(32),
    ReLU(),
    Dense(10),
    Activation('softmax')
コード例 #27
0
Out = TimeDistributed(Lambda(part))(mer)

layer_1 = -main_input

Out = TimeDistributed(Lambda(part2))([main_input, layer_1])

model = Model(inputs=[main_input, aux_input], outputs=Out)
model.compile(loss='mean_squared_error', optimizer='RMSprop')
model.predict([DATA, DATA2])
#%%
from tensorflow.keras.utils import plot_model
plot_model(model)

model = Sequential()
x = Input
model.add(InputLayer(input_shape=(3, 2)))
model.add(TimeDistributed(Lambda(lambda x: x[:, 0])))
model.compile(loss='mean_squared_error', optimizer='RMSprop')
out = model.predict(DATA)

model.predict()
#    model.add(BatchNormalization())
for i in range(n_layer):
    if i == n_layer - 1:
        model.add(
            LSTM(n_hidden,
                 dropout=dropout,
                 kernel_initializer=init,
                 return_sequences=False))
    else:
        model.add(
コード例 #28
0
        test_MSE.update_state(y_test, y_pred)
    test_MSE_hist.append(test_MSE.result())


def print_dots(epoch):
    if (epoch % 1 == 0): print('.', end='')
    if (epoch % 10 == 0): print('.')


#########################################################################
##  build and train the model
#########################################################################

simple_lstm_model = Sequential([
    InputLayer(input_shape=x_train.shape[1:]),
    Reshape(
        (x_train.shape[1], 4)),  ## collapse the derivative and high/low axes
    LSTM(int(10), activation='tanh'),
    Dense(int(2 * TARG_SIZE), activation=None),
    Reshape((TARG_SIZE, 2))
])

simple_lstm_model.compile(optimizer=tf.keras.optimizers.Adam(),
                          loss=tf.keras.losses.MSE,
                          metrics=[tf.keras.metrics.MSE])
simple_lstm_model.summary()

VALIDATION_STEPS = STEPS_PER_EPOCH
for epoch in EPOCHS:
    train_MSE.reset_states()
コード例 #29
0
ファイル: tf_layers.py プロジェクト: jatropj/phygnn
    def __init__(self, n_features, n_labels=1, hidden_layers=None,
                 input_layer=None, output_layer=None):
        """
        Parameters
        ----------
        n_features : int
            Number of features (inputs) to train the model on
        n_labels : int, optional
            Number of labels (outputs) to the model, by default 1
        hidden_layers : list | None, optional
            List of dictionaries of key word arguments for each hidden
            layer in the NN. Dense linear layers can be input with their
            activations or separately for more explicit control over the layer
            ordering. For example, this is a valid input for hidden_layers that
            will yield 8 hidden layers (10 layers including input+output):
                [{'units': 64, 'activation': 'relu', 'dropout': 0.01},
                 {'units': 64},
                 {'batch_normalization': {'axis': -1}},
                 {'activation': 'relu'},
                 {'dropout': 0.01},
                 {'class': 'Flatten'},
                 ]
            by default None which will lead to a single linear layer
        input_layer : None | dict
            Input layer. specification. Can be a dictionary similar to
            hidden_layers specifying a dense / conv / lstm layer.  Will
            default to a keras InputLayer with input shape = n_features.
        output_layer : None | list | dict
            Output layer specification. Can be a list/dict similar to
            hidden_layers input specifying a dense layer with activation.
            For example, for a classfication problem with a single output,
            output_layer should be [{'units': 1}, {'activation': 'sigmoid'}].
            This defaults to a single dense layer with no activation
            (best for regression problems).
        """

        self._i = 0
        self._layers = []
        self._hidden_layers_kwargs = copy.deepcopy(hidden_layers)
        self._input_layer_kwargs = copy.deepcopy(input_layer)
        self._output_layer_kwargs = copy.deepcopy(output_layer)

        if input_layer is None:
            self._layers = [InputLayer(input_shape=[n_features])]
        else:
            if not isinstance(input_layer, dict):
                msg = ('Input layer spec needs to be a dict but received: {}'
                       .format(type(input_layer)))
                raise TypeError(msg)
            else:
                self.add_layer(input_layer)

        if hidden_layers:
            for layer in hidden_layers:
                self.add_layer(layer)

        if output_layer is None:
            self._layers.append(Dense(n_labels))
        else:
            if isinstance(output_layer, dict):
                output_layer = [output_layer]
            if not isinstance(output_layer, list):
                msg = ('Output layer spec needs to be a dict or list but '
                       'received: {}'.format(type(output_layer)))
                raise TypeError(msg)
            for layer in output_layer:
                self.add_layer(layer)
コード例 #30
0
def final2DStacked(path):
    checkpoint_dir = os.path.dirname(path)
    model = Sequential()
    model.add(
        InputLayer(input_shape=(config.NUM_CHANNELS, config.IMG_HEIGHT,
                                config.IMG_WIDTH)))

    model.add(
        Conv2D(128, (3, 3),
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10),
               data_format="channels_first"))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(64, (3, 3),
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10),
               data_format="channels_first"))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(16, (1, 1),
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10),
               data_format="channels_first"))

    model.add(
        Conv2DTranspose(64, (3, 3),
                        strides=2,
                        padding="same",
                        kernel_regularizer=tf.keras.regularizers.l2(1e-10),
                        data_format="channels_first"))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(
        Conv2DTranspose(128, (3, 3),
                        strides=2,
                        padding="same",
                        kernel_regularizer=tf.keras.regularizers.l2(1e-10),
                        data_format="channels_first"))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(Conv2D(config.NUM_CHANNELS, (3, 3),
                     activation='sigmoid',
                     padding='same'),
              data_format="channels_first")
    model.summary()

    model.compile(loss='mean_squared_error', optimizer=Adam())
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=path,
        # monitor='val_loss',
        save_weights_only=True,
        # save_best_only=True,
        verbose=1,
        save_freq='epoch')

    latest = tf.train.latest_checkpoint(checkpoint_dir)
    # print('latestdasdasdas')
    print(latest)
    if latest is not None:
        model.load_weights(latest)
        print('weights loaded')
    model.save(path + 'model.h5')

    return model, cp_callback